body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this extends the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
if (commit_type not in ['create', 'delete']):
exp_models.ExplorationCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, exploration_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=self.status, post_commit_community_owned=self.community_owned, post_commit_is_private=(self.status == constants.ACTIVITY_STATUS_PRIVATE)).put()
| 8,086,757,080,571,136,000
|
Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
|
core/domain/activity_jobs_one_off_test.py
|
_trusted_commit
|
AnanyaNegi/oppia
|
python
|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this extends the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
if (commit_type not in ['create', 'delete']):
exp_models.ExplorationCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, exploration_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=self.status, post_commit_community_owned=self.community_owned, post_commit_is_private=(self.status == constants.ACTIVITY_STATUS_PRIVATE)).put()
|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this extends the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, which should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
topic_rights = MockTopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_models.TopicCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, topic_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=status, post_commit_community_owned=False, post_commit_is_private=(not topic_rights.topic_is_published)).put()
| 3,609,551,769,068,114,400
|
Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
|
core/domain/activity_jobs_one_off_test.py
|
_trusted_commit
|
AnanyaNegi/oppia
|
python
|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this extends the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, which should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
topic_rights = MockTopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_models.TopicCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, topic_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=status, post_commit_community_owned=False, post_commit_is_private=(not topic_rights.topic_is_published)).put()
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_id = activity_jobs_one_off.AddContentUserIdsContentJob.create_new()
activity_jobs_one_off.AddContentUserIdsContentJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AddContentUserIdsContentJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
| -5,408,814,941,831,667,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_id = activity_jobs_one_off.AddContentUserIdsContentJob.create_new()
activity_jobs_one_off.AddContentUserIdsContentJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AddContentUserIdsContentJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_id = activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.create_new()
activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
| 5,209,690,579,846,129,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_id = activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.create_new()
activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AddCommitCmdsUserIdsMetadataJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_id = activity_jobs_one_off.AuditSnapshotMetadataModelsJob.create_new()
activity_jobs_one_off.AuditSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AuditSnapshotMetadataModelsJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
| -3,907,230,091,904,724,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_id = activity_jobs_one_off.AuditSnapshotMetadataModelsJob.create_new()
activity_jobs_one_off.AuditSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AuditSnapshotMetadataModelsJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return [[key, (sorted(values) if isinstance(values, list) else values)] for (key, values) in eval_output]
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_class = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob
job_id = job_class.create_new()
activity_jobs_one_off.ValidateSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return eval_output
| 9,207,150,648,411,642,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_class = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob
job_id = job_class.create_new()
activity_jobs_one_off.ValidateSnapshotMetadataModelsJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.ValidateSnapshotMetadataModelsJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return eval_output
|
def clean_str(string):
'\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n '
string = re.sub("[^A-Za-z0-9(),!?\\'\\`]", ' ', string)
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub(',', ' , ', string)
string = re.sub('!', ' ! ', string)
string = re.sub('\\(', ' \\( ', string)
string = re.sub('\\)', ' \\) ', string)
string = re.sub('\\?', ' \\? ', string)
string = re.sub('\\s{2,}', ' ', string)
return string.strip().lower()
| 6,380,898,887,572,334,000
|
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
|
data_helpers.py
|
clean_str
|
pychuang/ist557-data-mining-cnn
|
python
|
def clean_str(string):
'\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n '
string = re.sub("[^A-Za-z0-9(),!?\\'\\`]", ' ', string)
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub(',', ' , ', string)
string = re.sub('!', ' ! ', string)
string = re.sub('\\(', ' \\( ', string)
string = re.sub('\\)', ' \\) ', string)
string = re.sub('\\?', ' \\? ', string)
string = re.sub('\\s{2,}', ' ', string)
return string.strip().lower()
|
def load_data_and_labels(data_file):
'\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n '
datapoints = load_datapoints(data_file)
x_text = extract_phrases_in_datapoints(datapoints)
y = [int(dp.Sentiment) for dp in datapoints]
def one_hot(i):
return ((([0] * i) + [1]) + ([0] * (4 - i)))
y_vector = []
for sentiment in y:
y_vector.append(one_hot(sentiment))
return [x_text, np.array(y_vector)]
| 4,745,854,949,447,349,000
|
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
|
data_helpers.py
|
load_data_and_labels
|
pychuang/ist557-data-mining-cnn
|
python
|
def load_data_and_labels(data_file):
'\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n '
datapoints = load_datapoints(data_file)
x_text = extract_phrases_in_datapoints(datapoints)
y = [int(dp.Sentiment) for dp in datapoints]
def one_hot(i):
return ((([0] * i) + [1]) + ([0] * (4 - i)))
y_vector = []
for sentiment in y:
y_vector.append(one_hot(sentiment))
return [x_text, np.array(y_vector)]
|
def batch_iter(data, batch_size, num_epochs, shuffle=True):
'\n Generates a batch iterator for a dataset.\n '
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = (int((len(data) / batch_size)) + 1)
for epoch in range(num_epochs):
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = (batch_num * batch_size)
end_index = min(((batch_num + 1) * batch_size), data_size)
(yield shuffled_data[start_index:end_index])
| 6,353,081,854,038,388,000
|
Generates a batch iterator for a dataset.
|
data_helpers.py
|
batch_iter
|
pychuang/ist557-data-mining-cnn
|
python
|
def batch_iter(data, batch_size, num_epochs, shuffle=True):
'\n \n '
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = (int((len(data) / batch_size)) + 1)
for epoch in range(num_epochs):
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = (batch_num * batch_size)
end_index = min(((batch_num + 1) * batch_size), data_size)
(yield shuffled_data[start_index:end_index])
|
def test_tabs(self):
'Test tabs functionality'
text = '\t\tHello'
expected = '\t\t'
got = get_leading_whitespace(text)
assert (expected == got)
| -2,184,680,040,339,133,400
|
Test tabs functionality
|
tests/test_utils.py
|
test_tabs
|
lukerm48/dyc
|
python
|
def test_tabs(self):
text = '\t\tHello'
expected = '\t\t'
got = get_leading_whitespace(text)
assert (expected == got)
|
def test_whitespace(self):
'Test whitespace functionality'
space = ' '
text = '{space}Such a long whitespace'.format(space=space)
expected = space
got = get_leading_whitespace(text)
assert (expected == got)
| 4,380,228,456,149,272,600
|
Test whitespace functionality
|
tests/test_utils.py
|
test_whitespace
|
lukerm48/dyc
|
python
|
def test_whitespace(self):
space = ' '
text = '{space}Such a long whitespace'.format(space=space)
expected = space
got = get_leading_whitespace(text)
assert (expected == got)
|
def test_valid_comments(self):
'Testing valid comments'
text = '# Hello World'
assert (is_comment(text, ['#']) == True)
| -3,741,295,515,058,425,000
|
Testing valid comments
|
tests/test_utils.py
|
test_valid_comments
|
lukerm48/dyc
|
python
|
def test_valid_comments(self):
text = '# Hello World'
assert (is_comment(text, ['#']) == True)
|
def test_invalid_comments(self):
'Testing invalid comments'
text = '# Hello World'
assert (is_comment(text, ['//']) == False)
| -2,414,815,970,119,370,000
|
Testing invalid comments
|
tests/test_utils.py
|
test_invalid_comments
|
lukerm48/dyc
|
python
|
def test_invalid_comments(self):
text = '# Hello World'
assert (is_comment(text, ['//']) == False)
|
def load_preprocessed_data(self):
'\n raw_data is a list that has three components\n component1) trajectory data for training\n component2) trajectory data for validation and visualization\n '
f = open(self.dataset_path, 'rb')
raw_data = pickle.load(f)
f.close()
counter = 0
self.train_data = []
for data in raw_data[0]:
scaled_data = np.copy(data)
self.train_data.append(scaled_data)
counter += int((len(scaled_data) - self.seq_length))
self.num_batches = int((counter / self.batch_size))
self.valid_data = []
for data in raw_data[1]:
scaled_data = np.copy(data)
self.valid_data.append(scaled_data)
self.map = []
for data in raw_data[2]:
self.map.append(data)
self.map_info = []
for data in raw_data[3]:
self.map_info.append(data)
| -470,723,059,986,581,900
|
raw_data is a list that has three components
component1) trajectory data for training
component2) trajectory data for validation and visualization
|
kitti_utils.py
|
load_preprocessed_data
|
d1024choi/trajpred_irl
|
python
|
def load_preprocessed_data(self):
'\n raw_data is a list that has three components\n component1) trajectory data for training\n component2) trajectory data for validation and visualization\n '
f = open(self.dataset_path, 'rb')
raw_data = pickle.load(f)
f.close()
counter = 0
self.train_data = []
for data in raw_data[0]:
scaled_data = np.copy(data)
self.train_data.append(scaled_data)
counter += int((len(scaled_data) - self.seq_length))
self.num_batches = int((counter / self.batch_size))
self.valid_data = []
for data in raw_data[1]:
scaled_data = np.copy(data)
self.valid_data.append(scaled_data)
self.map = []
for data in raw_data[2]:
self.map.append(data)
self.map_info = []
for data in raw_data[3]:
self.map_info.append(data)
|
def preprocess_sequence(self, seq, isValid, isDiff):
'\n dataset id (0)\n object id (1)\n target pose (2~3)\n neighbor pose (4~63)\n '
seq_len = seq.shape[0]
seq_tpose = np.copy(seq[:, 2:4])
seq_npose = np.copy(seq[:, 4:64]).reshape(seq_len, 30, 2)
dataset_index = int(seq[(0, 0)])
map = self.map[dataset_index]
(x_max, y_max, scale) = self.map_info[dataset_index]
seq_map = []
for i in range(seq_tpose.shape[0]):
x = seq_tpose[(i, 0)]
y = seq_tpose[(i, 1)]
corr_map = map_roi_extract(map, x, y, x_max, y_max, scale, int((self.map_size / 2)))
seq_map.append(corr_map)
" \n map_ = np.copy(np.copy(map_roi[i]))\n map_row_cnt = map_.shape[0] / 2\n map_col_cnt = map_.shape[1] / 2\n\n pose_start_x = seq_tpose[i, 0]\n pose_start_y = seq_tpose[i, 1]\n\n for kappa in range(0, seq_tpose.shape[0]-i):\n\n pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)\n pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)\n\n pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)\n pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)\n\n map_[pose_x, pose_y, 0] = 0\n map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))\n map_[pose_x, pose_y, 2] = 255\n\n cv2.imshow('test', map_)\n cv2.waitKey(0)\n "
if isValid:
donothing = 0
elif (self.data_augmentation == 1):
(seq_tpose, seq_npose, seq_map) = random_flip(seq_tpose, seq_npose, seq_map)
elif (self.data_augmentation == 2):
(seq_tpose, seq_npose) = random_rotate(seq_tpose, seq_npose)
" \n for i in range(seq_tpose.shape[0]):\n map_ = np.copy(np.copy(map_roi[i]))\n map_row_cnt = map_.shape[0] / 2\n map_col_cnt = map_.shape[1] / 2\n\n pose_start_x = seq_tpose[i, 0]\n pose_start_y = seq_tpose[i, 1]\n\n for kappa in range(0, seq_tpose.shape[0]-i):\n\n pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)\n pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)\n\n pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)\n pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)\n\n map_[pose_x, pose_y, 0] = 0\n map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))\n map_[pose_x, pose_y, 2] = 255\n\n cv2.imshow('test', map_)\n cv2.waitKey(0)\n "
"' \n ego = np.copy(seq_tpose)\n plt.plot(ego[:, 0], ego[:, 1], 'o')\n for i in range(30):\n ngh = np.squeeze(seq_npose[:, i, :]) # seq_len x 2\n ngh_ = ngh[ngh[:, 0]>-1000, :]\n if (len(ngh_) > 1):\n plt.plot(ngh_[:, 0], ngh[:, 1], '+')\n plt.show()\n "
num_grid = int((self.social_range / self.social_grid_size))
seq_sgrid = np.zeros(shape=(seq_len, (num_grid * num_grid)))
for i in range(seq_len):
social_grid = np.zeros(shape=(num_grid, num_grid))
target_pose = seq_tpose[i, :].reshape(1, 2)
neighbors_pose = seq_npose[i, :]
for j in range(30):
if ((neighbors_pose[(j, 0)] == (- 1000)) or (neighbors_pose[(j, 0)] == 1000)):
continue
else:
neighbor_pose = neighbors_pose[j, :].reshape(1, 2)
social_grid = getSocialMatrix(social_grid, target_pose, neighbor_pose, self.social_range, self.social_grid_size)
seq_sgrid[i, :] = social_grid.reshape(1, (num_grid * num_grid))
seq_tpose_cur = np.copy(seq_tpose[1:, :])
seq_tpose_pre = np.copy(seq_tpose[:(- 1), :])
seq_tpose_diff = (seq_tpose_cur - seq_tpose_pre)
if isDiff:
return ((seq_tpose_diff / self.scale_factor), np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:]))
else:
return (seq_tpose_cur, np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:]))
| 4,740,553,938,265,372,000
|
dataset id (0)
object id (1)
target pose (2~3)
neighbor pose (4~63)
|
kitti_utils.py
|
preprocess_sequence
|
d1024choi/trajpred_irl
|
python
|
def preprocess_sequence(self, seq, isValid, isDiff):
'\n dataset id (0)\n object id (1)\n target pose (2~3)\n neighbor pose (4~63)\n '
seq_len = seq.shape[0]
seq_tpose = np.copy(seq[:, 2:4])
seq_npose = np.copy(seq[:, 4:64]).reshape(seq_len, 30, 2)
dataset_index = int(seq[(0, 0)])
map = self.map[dataset_index]
(x_max, y_max, scale) = self.map_info[dataset_index]
seq_map = []
for i in range(seq_tpose.shape[0]):
x = seq_tpose[(i, 0)]
y = seq_tpose[(i, 1)]
corr_map = map_roi_extract(map, x, y, x_max, y_max, scale, int((self.map_size / 2)))
seq_map.append(corr_map)
" \n map_ = np.copy(np.copy(map_roi[i]))\n map_row_cnt = map_.shape[0] / 2\n map_col_cnt = map_.shape[1] / 2\n\n pose_start_x = seq_tpose[i, 0]\n pose_start_y = seq_tpose[i, 1]\n\n for kappa in range(0, seq_tpose.shape[0]-i):\n\n pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)\n pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)\n\n pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)\n pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)\n\n map_[pose_x, pose_y, 0] = 0\n map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))\n map_[pose_x, pose_y, 2] = 255\n\n cv2.imshow('test', map_)\n cv2.waitKey(0)\n "
if isValid:
donothing = 0
elif (self.data_augmentation == 1):
(seq_tpose, seq_npose, seq_map) = random_flip(seq_tpose, seq_npose, seq_map)
elif (self.data_augmentation == 2):
(seq_tpose, seq_npose) = random_rotate(seq_tpose, seq_npose)
" \n for i in range(seq_tpose.shape[0]):\n map_ = np.copy(np.copy(map_roi[i]))\n map_row_cnt = map_.shape[0] / 2\n map_col_cnt = map_.shape[1] / 2\n\n pose_start_x = seq_tpose[i, 0]\n pose_start_y = seq_tpose[i, 1]\n\n for kappa in range(0, seq_tpose.shape[0]-i):\n\n pose_x = int(3 * (seq_tpose[i+kappa, 0] - pose_start_x) + map_row_cnt)\n pose_y = int(3 * (seq_tpose[i+kappa, 1] - pose_start_y) + map_col_cnt)\n\n pose_x = _min(_max(pose_x, 0), map_.shape[0] - 1)\n pose_y = _min(_max(pose_y, 0), map_.shape[1] - 1)\n\n map_[pose_x, pose_y, 0] = 0\n map_[pose_x, pose_y, 1] = int(255.0 * float(i+kappa) / float(seq_tpose.shape[0]-1))\n map_[pose_x, pose_y, 2] = 255\n\n cv2.imshow('test', map_)\n cv2.waitKey(0)\n "
"' \n ego = np.copy(seq_tpose)\n plt.plot(ego[:, 0], ego[:, 1], 'o')\n for i in range(30):\n ngh = np.squeeze(seq_npose[:, i, :]) # seq_len x 2\n ngh_ = ngh[ngh[:, 0]>-1000, :]\n if (len(ngh_) > 1):\n plt.plot(ngh_[:, 0], ngh[:, 1], '+')\n plt.show()\n "
num_grid = int((self.social_range / self.social_grid_size))
seq_sgrid = np.zeros(shape=(seq_len, (num_grid * num_grid)))
for i in range(seq_len):
social_grid = np.zeros(shape=(num_grid, num_grid))
target_pose = seq_tpose[i, :].reshape(1, 2)
neighbors_pose = seq_npose[i, :]
for j in range(30):
if ((neighbors_pose[(j, 0)] == (- 1000)) or (neighbors_pose[(j, 0)] == 1000)):
continue
else:
neighbor_pose = neighbors_pose[j, :].reshape(1, 2)
social_grid = getSocialMatrix(social_grid, target_pose, neighbor_pose, self.social_range, self.social_grid_size)
seq_sgrid[i, :] = social_grid.reshape(1, (num_grid * num_grid))
seq_tpose_cur = np.copy(seq_tpose[1:, :])
seq_tpose_pre = np.copy(seq_tpose[:(- 1), :])
seq_tpose_diff = (seq_tpose_cur - seq_tpose_pre)
if isDiff:
return ((seq_tpose_diff / self.scale_factor), np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:]))
else:
return (seq_tpose_cur, np.copy(seq_sgrid[1:, :]), np.array(seq_map[1:]))
|
def next_batch(self):
'\n Read a batch randomly\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
for i in range(self.batch_size):
data = self.train_data[self.pointer]
idx = random.randint(0, ((len(data) - self.seq_length) - 2))
seq_all = np.copy(data[idx:((idx + self.seq_length) + 2)])
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=False, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:(self.seq_length + 1)])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
' \n if len(data) is smaller than 50, self.seq_length is 24\n n_batch is 1, therefore, (1.0 / n_batch) is 1\n then the following is the same as\n if random.random() < 1, then go next with prob. 1\n\n if len(data) is greater than 50, self.seq_length is 24\n n_batch is 2, therefore, (1.0 / n_batch) is 0.5\n then the following is the same as\n if random.random() < 0.5, then go next with prob. 0.5\n '
n_batch = int((len(data) / (self.seq_length + 2)))
if (random.random() < (1.0 / float(n_batch))):
self.tick_batch_pointer()
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
| -7,187,001,706,206,327,000
|
Read a batch randomly
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
|
kitti_utils.py
|
next_batch
|
d1024choi/trajpred_irl
|
python
|
def next_batch(self):
'\n Read a batch randomly\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
for i in range(self.batch_size):
data = self.train_data[self.pointer]
idx = random.randint(0, ((len(data) - self.seq_length) - 2))
seq_all = np.copy(data[idx:((idx + self.seq_length) + 2)])
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=False, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:(self.seq_length + 1)])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
' \n if len(data) is smaller than 50, self.seq_length is 24\n n_batch is 1, therefore, (1.0 / n_batch) is 1\n then the following is the same as\n if random.random() < 1, then go next with prob. 1\n\n if len(data) is greater than 50, self.seq_length is 24\n n_batch is 2, therefore, (1.0 / n_batch) is 0.5\n then the following is the same as\n if random.random() < 0.5, then go next with prob. 0.5\n '
n_batch = int((len(data) / (self.seq_length + 2)))
if (random.random() < (1.0 / float(n_batch))):
self.tick_batch_pointer()
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
|
def next_batch_valid(self):
'\n Read a batch randomly for validation during training\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
counter = 0
while (len(x_batch) < self.batch_size):
data = self.valid_data[self.pointer]
if (self.frame_pointer < ((len(data) - self.seq_length) - 1)):
idx = self.frame_pointer
seq_all = np.copy(data[idx:((idx + self.seq_length) + 2)])
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=True, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:(self.seq_length + 1)])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
self.frame_pointer += int((self.seq_length / 4))
elif (self.pointer >= (len(self.valid_data) - 1)):
x_batch = []
y_batch = []
sg_batch = []
d_batch = []
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
else:
self.pointer += 1
self.frame_pointer = 0
counter += 1
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
| -2,475,856,481,739,356,000
|
Read a batch randomly for validation during training
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
|
kitti_utils.py
|
next_batch_valid
|
d1024choi/trajpred_irl
|
python
|
def next_batch_valid(self):
'\n Read a batch randomly for validation during training\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
x_batch = []
y_batch = []
sg_batch = []
map_batch = []
d_batch = []
counter = 0
while (len(x_batch) < self.batch_size):
data = self.valid_data[self.pointer]
if (self.frame_pointer < ((len(data) - self.seq_length) - 1)):
idx = self.frame_pointer
seq_all = np.copy(data[idx:((idx + self.seq_length) + 2)])
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=True, isDiff=True)
seq_x = np.copy(seq_all_proc[0:self.seq_length])
seq_y = np.copy(seq_all_proc[1:(self.seq_length + 1)])
seq_sgrid_x = np.copy(seq_sgrid[0:self.seq_length, :])
y_batch.append(seq_y)
x_batch.append(seq_x)
sg_batch.append(seq_sgrid_x)
map_batch.append(seq_map[0:self.seq_length])
d_batch.append([self.pointer, idx])
self.frame_pointer += int((self.seq_length / 4))
elif (self.pointer >= (len(self.valid_data) - 1)):
x_batch = []
y_batch = []
sg_batch = []
d_batch = []
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
else:
self.pointer += 1
self.frame_pointer = 0
counter += 1
return (x_batch, y_batch, sg_batch, map_batch, d_batch)
|
def next_sequence_valid(self):
'\n\n dataset id (0)\n object id (1)\n target pose (2~3)\n neighbor pose (4~63)\n\n Read a batch randomly for validation and visualization\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
NotEndOfData = True
while NotEndOfData:
if (self.pointer >= len(self.valid_data)):
x = []
grid = []
map = []
x_max = []
y_max = []
scale = []
dataset_index = []
NotEndOfData = False
break
elif (self.frame_pointer >= ((len(self.valid_data[self.pointer]) - self.seq_length) - 2)):
self.frame_pointer = 0
self.pointer += 1
else:
data = self.valid_data[self.pointer]
idx = self.frame_pointer
seq_all = np.copy(data[idx:((idx + self.seq_length) + 1)])
dataset_index = int(seq_all[(0, 0)])
map = self.map[dataset_index]
(x_max, y_max, scale) = self.map_info[dataset_index]
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=True, isDiff=False)
x = np.copy(seq_all_proc[0:(self.seq_length + 1)])
grid = np.copy(seq_sgrid[0:(self.seq_length + 1)])
print(('seq_pointer %d, frame_pointer %d' % (self.pointer, self.frame_pointer)))
self.frame_pointer += int((self.seq_length + 1))
break
return (x, grid, map, x_max, y_max, scale, dataset_index, NotEndOfData)
| 7,151,321,459,105,572,000
|
dataset id (0)
object id (1)
target pose (2~3)
neighbor pose (4~63)
Read a batch randomly for validation and visualization
:x_batch: <batch size x seq_length x input_dim>
:y_batch: <batch size x seq_length x input_dim>
:d_batch: <batch size x seq_length>
|
kitti_utils.py
|
next_sequence_valid
|
d1024choi/trajpred_irl
|
python
|
def next_sequence_valid(self):
'\n\n dataset id (0)\n object id (1)\n target pose (2~3)\n neighbor pose (4~63)\n\n Read a batch randomly for validation and visualization\n :x_batch: <batch size x seq_length x input_dim>\n :y_batch: <batch size x seq_length x input_dim>\n :d_batch: <batch size x seq_length>\n '
NotEndOfData = True
while NotEndOfData:
if (self.pointer >= len(self.valid_data)):
x = []
grid = []
map = []
x_max = []
y_max = []
scale = []
dataset_index = []
NotEndOfData = False
break
elif (self.frame_pointer >= ((len(self.valid_data[self.pointer]) - self.seq_length) - 2)):
self.frame_pointer = 0
self.pointer += 1
else:
data = self.valid_data[self.pointer]
idx = self.frame_pointer
seq_all = np.copy(data[idx:((idx + self.seq_length) + 1)])
dataset_index = int(seq_all[(0, 0)])
map = self.map[dataset_index]
(x_max, y_max, scale) = self.map_info[dataset_index]
(seq_all_proc, seq_sgrid, seq_map) = self.preprocess_sequence(seq_all, isValid=True, isDiff=False)
x = np.copy(seq_all_proc[0:(self.seq_length + 1)])
grid = np.copy(seq_sgrid[0:(self.seq_length + 1)])
print(('seq_pointer %d, frame_pointer %d' % (self.pointer, self.frame_pointer)))
self.frame_pointer += int((self.seq_length + 1))
break
return (x, grid, map, x_max, y_max, scale, dataset_index, NotEndOfData)
|
def _load_from_socket(port, auth_secret):
'\n Load data from a given socket, this is a blocking method thus only return when the socket\n connection has been closed.\n '
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
sock.settimeout(None)
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
res = UTF8Deserializer().loads(sockfile)
sockfile.close()
sock.close()
return res
| 4,420,216,276,343,981,000
|
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
|
python/pyspark/taskcontext.py
|
_load_from_socket
|
2RedSquares/spark
|
python
|
def _load_from_socket(port, auth_secret):
'\n Load data from a given socket, this is a blocking method thus only return when the socket\n connection has been closed.\n '
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
sock.settimeout(None)
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
res = UTF8Deserializer().loads(sockfile)
sockfile.close()
sock.close()
return res
|
def __new__(cls):
'Even if users construct TaskContext instead of using get, give them the singleton.'
taskContext = cls._taskContext
if (taskContext is not None):
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
| 3,980,086,144,201,819,600
|
Even if users construct TaskContext instead of using get, give them the singleton.
|
python/pyspark/taskcontext.py
|
__new__
|
2RedSquares/spark
|
python
|
def __new__(cls):
taskContext = cls._taskContext
if (taskContext is not None):
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
|
@classmethod
def _getOrCreate(cls):
'Internal function to get or create global TaskContext.'
if (cls._taskContext is None):
cls._taskContext = TaskContext()
return cls._taskContext
| 6,418,743,054,464,887,000
|
Internal function to get or create global TaskContext.
|
python/pyspark/taskcontext.py
|
_getOrCreate
|
2RedSquares/spark
|
python
|
@classmethod
def _getOrCreate(cls):
if (cls._taskContext is None):
cls._taskContext = TaskContext()
return cls._taskContext
|
@classmethod
def get(cls):
'\n Return the currently active TaskContext. This can be called inside of\n user functions to access contextual information about running tasks.\n\n .. note:: Must be called on the worker, not the driver. Returns None if not initialized.\n '
return cls._taskContext
| 1,419,744,605,024,734,200
|
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
|
python/pyspark/taskcontext.py
|
get
|
2RedSquares/spark
|
python
|
@classmethod
def get(cls):
'\n Return the currently active TaskContext. This can be called inside of\n user functions to access contextual information about running tasks.\n\n .. note:: Must be called on the worker, not the driver. Returns None if not initialized.\n '
return cls._taskContext
|
def stageId(self):
'The ID of the stage that this task belong to.'
return self._stageId
| -8,501,152,381,933,950,000
|
The ID of the stage that this task belong to.
|
python/pyspark/taskcontext.py
|
stageId
|
2RedSquares/spark
|
python
|
def stageId(self):
return self._stageId
|
def partitionId(self):
'\n The ID of the RDD partition that is computed by this task.\n '
return self._partitionId
| 4,923,525,649,721,193,000
|
The ID of the RDD partition that is computed by this task.
|
python/pyspark/taskcontext.py
|
partitionId
|
2RedSquares/spark
|
python
|
def partitionId(self):
'\n \n '
return self._partitionId
|
def attemptNumber(self):
'"\n How many times this task has been attempted. The first task attempt will be assigned\n attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.\n '
return self._attemptNumber
| 8,904,765,901,230,001,000
|
"
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
|
python/pyspark/taskcontext.py
|
attemptNumber
|
2RedSquares/spark
|
python
|
def attemptNumber(self):
'"\n How many times this task has been attempted. The first task attempt will be assigned\n attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.\n '
return self._attemptNumber
|
def taskAttemptId(self):
"\n An ID that is unique to this task attempt (within the same SparkContext, no two task\n attempts will share the same attempt ID). This is roughly equivalent to Hadoop's\n TaskAttemptID.\n "
return self._taskAttemptId
| -2,749,768,595,232,958,500
|
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
|
python/pyspark/taskcontext.py
|
taskAttemptId
|
2RedSquares/spark
|
python
|
def taskAttemptId(self):
"\n An ID that is unique to this task attempt (within the same SparkContext, no two task\n attempts will share the same attempt ID). This is roughly equivalent to Hadoop's\n TaskAttemptID.\n "
return self._taskAttemptId
|
def getLocalProperty(self, key):
'\n Get a local property set upstream in the driver, or None if it is missing.\n '
return self._localProperties.get(key, None)
| -8,642,961,275,192,264,000
|
Get a local property set upstream in the driver, or None if it is missing.
|
python/pyspark/taskcontext.py
|
getLocalProperty
|
2RedSquares/spark
|
python
|
def getLocalProperty(self, key):
'\n \n '
return self._localProperties.get(key, None)
|
def resources(self):
'\n Resources allocated to the task. The key is the resource name and the value is information\n about the resource.\n '
return self._resources
| -8,342,268,450,500,635,000
|
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
|
python/pyspark/taskcontext.py
|
resources
|
2RedSquares/spark
|
python
|
def resources(self):
'\n Resources allocated to the task. The key is the resource name and the value is information\n about the resource.\n '
return self._resources
|
@classmethod
def _getOrCreate(cls):
'\n Internal function to get or create global BarrierTaskContext. We need to make sure\n BarrierTaskContext is returned from here because it is needed in python worker reuse\n scenario, see SPARK-25921 for more details.\n '
if (not isinstance(cls._taskContext, BarrierTaskContext)):
cls._taskContext = object.__new__(cls)
return cls._taskContext
| 2,762,703,837,966,486,500
|
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
|
python/pyspark/taskcontext.py
|
_getOrCreate
|
2RedSquares/spark
|
python
|
@classmethod
def _getOrCreate(cls):
'\n Internal function to get or create global BarrierTaskContext. We need to make sure\n BarrierTaskContext is returned from here because it is needed in python worker reuse\n scenario, see SPARK-25921 for more details.\n '
if (not isinstance(cls._taskContext, BarrierTaskContext)):
cls._taskContext = object.__new__(cls)
return cls._taskContext
|
@classmethod
def get(cls):
'\n .. note:: Experimental\n\n Return the currently active :class:`BarrierTaskContext`.\n This can be called inside of user functions to access contextual information about\n running tasks.\n\n .. note:: Must be called on the worker, not the driver. Returns None if not initialized.\n '
return cls._taskContext
| 8,051,729,507,975,203,000
|
.. note:: Experimental
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
|
python/pyspark/taskcontext.py
|
get
|
2RedSquares/spark
|
python
|
@classmethod
def get(cls):
'\n .. note:: Experimental\n\n Return the currently active :class:`BarrierTaskContext`.\n This can be called inside of user functions to access contextual information about\n running tasks.\n\n .. note:: Must be called on the worker, not the driver. Returns None if not initialized.\n '
return cls._taskContext
|
@classmethod
def _initialize(cls, port, secret):
'\n Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called\n after BarrierTaskContext is initialized.\n '
cls._port = port
cls._secret = secret
| -1,927,426,430,799,265,000
|
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
|
python/pyspark/taskcontext.py
|
_initialize
|
2RedSquares/spark
|
python
|
@classmethod
def _initialize(cls, port, secret):
'\n Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called\n after BarrierTaskContext is initialized.\n '
cls._port = port
cls._secret = secret
|
def barrier(self):
'\n .. note:: Experimental\n\n Sets a global barrier and waits until all tasks in this stage hit this barrier.\n Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks\n in the same stage have reached this routine.\n\n .. warning:: In a barrier stage, each task much have the same number of `barrier()`\n calls, in all possible code branches.\n Otherwise, you may get the job hanging or a SparkException after timeout.\n\n .. versionadded:: 2.4.0\n '
if ((self._port is None) or (self._secret is None)):
raise Exception(('Not supported to call barrier() before initialize ' + 'BarrierTaskContext.'))
else:
_load_from_socket(self._port, self._secret)
| -5,306,368,122,698,499,000
|
.. note:: Experimental
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 2.4.0
|
python/pyspark/taskcontext.py
|
barrier
|
2RedSquares/spark
|
python
|
def barrier(self):
'\n .. note:: Experimental\n\n Sets a global barrier and waits until all tasks in this stage hit this barrier.\n Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks\n in the same stage have reached this routine.\n\n .. warning:: In a barrier stage, each task much have the same number of `barrier()`\n calls, in all possible code branches.\n Otherwise, you may get the job hanging or a SparkException after timeout.\n\n .. versionadded:: 2.4.0\n '
if ((self._port is None) or (self._secret is None)):
raise Exception(('Not supported to call barrier() before initialize ' + 'BarrierTaskContext.'))
else:
_load_from_socket(self._port, self._secret)
|
def getTaskInfos(self):
'\n .. note:: Experimental\n\n Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,\n ordered by partition ID.\n\n .. versionadded:: 2.4.0\n '
if ((self._port is None) or (self._secret is None)):
raise Exception(('Not supported to call getTaskInfos() before initialize ' + 'BarrierTaskContext.'))
else:
addresses = self._localProperties.get('addresses', '')
return [BarrierTaskInfo(h.strip()) for h in addresses.split(',')]
| 855,620,321,117,693,600
|
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
|
python/pyspark/taskcontext.py
|
getTaskInfos
|
2RedSquares/spark
|
python
|
def getTaskInfos(self):
'\n .. note:: Experimental\n\n Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,\n ordered by partition ID.\n\n .. versionadded:: 2.4.0\n '
if ((self._port is None) or (self._secret is None)):
raise Exception(('Not supported to call getTaskInfos() before initialize ' + 'BarrierTaskContext.'))
else:
addresses = self._localProperties.get('addresses', )
return [BarrierTaskInfo(h.strip()) for h in addresses.split(',')]
|
def run_test(self):
'Main test logic'
cli_response = self.nodes[0].cli('-version').send_cli()
assert ('Deepcoin Core RPC client version' in cli_response)
self.log.info('Compare responses from gewalletinfo RPC and `deepcoin-cli getwalletinfo`')
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info('Compare responses from getblockchaininfo RPC and `deepcoin-cli getblockchaininfo`')
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
(user, password) = get_auth_cookie(self.nodes[0].datadir)
self.log.info('Test -stdinrpcpass option')
assert_equal(0, self.nodes[0].cli(('-rpcuser=%s' % user), '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(('-rpcuser=%s' % user), '-stdinrpcpass', input='foo').echo)
self.log.info('Test -stdin and -stdinrpcpass')
assert_equal(['foo', 'bar'], self.nodes[0].cli(('-rpcuser=%s' % user), '-stdin', '-stdinrpcpass', input=(password + '\nfoo\nbar')).echo())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(('-rpcuser=%s' % user), '-stdin', '-stdinrpcpass', input='foo').echo)
self.log.info('Test connecting to a non-existing server')
assert_raises_process_error(1, 'Could not connect to the server', self.nodes[0].cli('-rpcport=1').echo)
self.log.info('Test connecting with non-existing RPC cookie file')
assert_raises_process_error(1, 'Could not locate RPC credentials', self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info('Make sure that -getinfo with arguments fails')
assert_raises_process_error(1, '-getinfo takes no arguments', self.nodes[0].cli('-getinfo').help)
self.log.info('Compare responses from `deepcoin-cli -getinfo` and the RPCs data is retrieved from.')
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], (blockchain_info['chain'] == 'test'))
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
| 1,788,795,389,587,930,000
|
Main test logic
|
test/functional/interface_deepcoin_cli.py
|
run_test
|
deepcoindev2/Deepcoin
|
python
|
def run_test(self):
cli_response = self.nodes[0].cli('-version').send_cli()
assert ('Deepcoin Core RPC client version' in cli_response)
self.log.info('Compare responses from gewalletinfo RPC and `deepcoin-cli getwalletinfo`')
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info('Compare responses from getblockchaininfo RPC and `deepcoin-cli getblockchaininfo`')
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
(user, password) = get_auth_cookie(self.nodes[0].datadir)
self.log.info('Test -stdinrpcpass option')
assert_equal(0, self.nodes[0].cli(('-rpcuser=%s' % user), '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(('-rpcuser=%s' % user), '-stdinrpcpass', input='foo').echo)
self.log.info('Test -stdin and -stdinrpcpass')
assert_equal(['foo', 'bar'], self.nodes[0].cli(('-rpcuser=%s' % user), '-stdin', '-stdinrpcpass', input=(password + '\nfoo\nbar')).echo())
assert_raises_process_error(1, 'Incorrect rpcuser or rpcpassword', self.nodes[0].cli(('-rpcuser=%s' % user), '-stdin', '-stdinrpcpass', input='foo').echo)
self.log.info('Test connecting to a non-existing server')
assert_raises_process_error(1, 'Could not connect to the server', self.nodes[0].cli('-rpcport=1').echo)
self.log.info('Test connecting with non-existing RPC cookie file')
assert_raises_process_error(1, 'Could not locate RPC credentials', self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info('Make sure that -getinfo with arguments fails')
assert_raises_process_error(1, '-getinfo takes no arguments', self.nodes[0].cli('-getinfo').help)
self.log.info('Compare responses from `deepcoin-cli -getinfo` and the RPCs data is retrieved from.')
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], (blockchain_info['chain'] == 'test'))
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
|
def __init__(self):
'FlatLocales - a model defined in OpenAPI'
self.discriminator = None
| 2,601,218,367,010,354,000
|
FlatLocales - a model defined in OpenAPI
|
flat_api/models/flat_locales.py
|
__init__
|
FlatIO/api-client-python
|
python
|
def __init__(self):
self.discriminator = None
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 8,442,519,487,048,767,000
|
Returns the model properties as a dict
|
flat_api/models/flat_locales.py
|
to_dict
|
FlatIO/api-client-python
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
flat_api/models/flat_locales.py
|
to_str
|
FlatIO/api-client-python
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
flat_api/models/flat_locales.py
|
__repr__
|
FlatIO/api-client-python
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, FlatLocales)):
return False
return (self.__dict__ == other.__dict__)
| 8,377,921,118,189,592,000
|
Returns true if both objects are equal
|
flat_api/models/flat_locales.py
|
__eq__
|
FlatIO/api-client-python
|
python
|
def __eq__(self, other):
if (not isinstance(other, FlatLocales)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
flat_api/models/flat_locales.py
|
__ne__
|
FlatIO/api-client-python
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def child(self, index):
'Returns the child HDPrivateKey at a particular index.\n Hardened children return for indices >= 0x8000000.\n '
if (index >= 2147483648):
data = (int_to_big_endian(self.private_key.secret, 33) + int_to_big_endian(index, 4))
else:
data = (self.private_key.point.sec() + int_to_big_endian(index, 4))
h = hmac_sha512(self.chain_code, data)
secret = ((big_endian_to_int(h[:32]) + self.private_key.secret) % N)
private_key = PrivateKey(secret=secret)
chain_code = h[32:]
depth = (self.depth + 1)
parent_fingerprint = self.fingerprint()
child_number = index
return HDPrivateKey(private_key=private_key, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=self.testnet)
| 7,838,682,160,407,161,000
|
Returns the child HDPrivateKey at a particular index.
Hardened children return for indices >= 0x8000000.
|
session6/hd.py
|
child
|
jimmysong/pw-exercises
|
python
|
def child(self, index):
'Returns the child HDPrivateKey at a particular index.\n Hardened children return for indices >= 0x8000000.\n '
if (index >= 2147483648):
data = (int_to_big_endian(self.private_key.secret, 33) + int_to_big_endian(index, 4))
else:
data = (self.private_key.point.sec() + int_to_big_endian(index, 4))
h = hmac_sha512(self.chain_code, data)
secret = ((big_endian_to_int(h[:32]) + self.private_key.secret) % N)
private_key = PrivateKey(secret=secret)
chain_code = h[32:]
depth = (self.depth + 1)
parent_fingerprint = self.fingerprint()
child_number = index
return HDPrivateKey(private_key=private_key, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=self.testnet)
|
def traverse(self, path):
"Returns the HDPrivateKey at the path indicated.\n Path should be in the form of m/x/y/z where x' means\n hardened"
current = self
components = path.split('/')[1:]
for child in components:
if child.endswith("'"):
index = (int(child[:(- 1)]) + 2147483648)
else:
index = int(child)
current = current.child(index)
return current
| 924,929,705,790,682,600
|
Returns the HDPrivateKey at the path indicated.
Path should be in the form of m/x/y/z where x' means
hardened
|
session6/hd.py
|
traverse
|
jimmysong/pw-exercises
|
python
|
def traverse(self, path):
"Returns the HDPrivateKey at the path indicated.\n Path should be in the form of m/x/y/z where x' means\n hardened"
current = self
components = path.split('/')[1:]
for child in components:
if child.endswith("'"):
index = (int(child[:(- 1)]) + 2147483648)
else:
index = int(child)
current = current.child(index)
return current
|
def _prv(self, version):
'Returns the base58-encoded x/y/z prv.\n Expects a 4-byte version.'
raw = self.raw_serialize(version)
return encode_base58_checksum(raw)
| 2,242,973,465,055,090,200
|
Returns the base58-encoded x/y/z prv.
Expects a 4-byte version.
|
session6/hd.py
|
_prv
|
jimmysong/pw-exercises
|
python
|
def _prv(self, version):
'Returns the base58-encoded x/y/z prv.\n Expects a 4-byte version.'
raw = self.raw_serialize(version)
return encode_base58_checksum(raw)
|
@classmethod
def parse(cls, s):
'Returns a HDPrivateKey from an extended key string'
raw = raw_decode_base58(s)
if (len(raw) != 78):
raise ValueError('Not a proper extended key')
stream = BytesIO(raw)
return cls.raw_parse(stream)
| -3,830,092,010,722,180,600
|
Returns a HDPrivateKey from an extended key string
|
session6/hd.py
|
parse
|
jimmysong/pw-exercises
|
python
|
@classmethod
def parse(cls, s):
raw = raw_decode_base58(s)
if (len(raw) != 78):
raise ValueError('Not a proper extended key')
stream = BytesIO(raw)
return cls.raw_parse(stream)
|
@classmethod
def raw_parse(cls, s):
'Returns a HDPrivateKey from a stream'
version = s.read(4)
if (version in (TESTNET_XPRV, TESTNET_YPRV, TESTNET_ZPRV)):
testnet = True
elif (version in (MAINNET_XPRV, MAINNET_YPRV, MAINNET_ZPRV)):
testnet = False
else:
raise ValueError('not an xprv, yprv or zprv: {}'.format(version))
depth = byte_to_int(s.read(1))
parent_fingerprint = s.read(4)
child_number = big_endian_to_int(s.read(4))
chain_code = s.read(32)
if (byte_to_int(s.read(1)) != 0):
raise ValueError('private key should be preceded by a zero byte')
private_key = PrivateKey(secret=big_endian_to_int(s.read(32)))
return cls(private_key=private_key, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=testnet)
| 1,832,675,882,951,446,000
|
Returns a HDPrivateKey from a stream
|
session6/hd.py
|
raw_parse
|
jimmysong/pw-exercises
|
python
|
@classmethod
def raw_parse(cls, s):
version = s.read(4)
if (version in (TESTNET_XPRV, TESTNET_YPRV, TESTNET_ZPRV)):
testnet = True
elif (version in (MAINNET_XPRV, MAINNET_YPRV, MAINNET_ZPRV)):
testnet = False
else:
raise ValueError('not an xprv, yprv or zprv: {}'.format(version))
depth = byte_to_int(s.read(1))
parent_fingerprint = s.read(4)
child_number = big_endian_to_int(s.read(4))
chain_code = s.read(32)
if (byte_to_int(s.read(1)) != 0):
raise ValueError('private key should be preceded by a zero byte')
private_key = PrivateKey(secret=big_endian_to_int(s.read(32)))
return cls(private_key=private_key, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=testnet)
|
def _get_address(self, purpose, account=0, external=True, address=0):
"Returns the proper address among purposes 44', 49' and 84'.\n p2pkh for 44', p2sh-p2wpkh for 49' and p2wpkh for 84'."
if (purpose not in ("44'", "49'", "84'")):
raise ValueError('Cannot create an address without a proper purpose: {}'.format(purpose))
if self.testnet:
coin = "1'"
else:
coin = "0'"
if external:
chain = '0'
else:
chain = '1'
path = "m/{}/{}/{}'/{}/{}".format(purpose, coin, account, chain, address)
hd_priv = self.traverse(path)
if (purpose == "44'"):
return hd_priv.address()
elif (purpose == "49'"):
return hd_priv.p2sh_p2wpkh_address()
elif (purpose == "84'"):
return hd_priv.bech32_address()
| 5,953,938,896,022,769,000
|
Returns the proper address among purposes 44', 49' and 84'.
p2pkh for 44', p2sh-p2wpkh for 49' and p2wpkh for 84'.
|
session6/hd.py
|
_get_address
|
jimmysong/pw-exercises
|
python
|
def _get_address(self, purpose, account=0, external=True, address=0):
"Returns the proper address among purposes 44', 49' and 84'.\n p2pkh for 44', p2sh-p2wpkh for 49' and p2wpkh for 84'."
if (purpose not in ("44'", "49'", "84'")):
raise ValueError('Cannot create an address without a proper purpose: {}'.format(purpose))
if self.testnet:
coin = "1'"
else:
coin = "0'"
if external:
chain = '0'
else:
chain = '1'
path = "m/{}/{}/{}'/{}/{}".format(purpose, coin, account, chain, address)
hd_priv = self.traverse(path)
if (purpose == "44'"):
return hd_priv.address()
elif (purpose == "49'"):
return hd_priv.p2sh_p2wpkh_address()
elif (purpose == "84'"):
return hd_priv.bech32_address()
|
@classmethod
def from_mnemonic(cls, mnemonic, password=b'', path='m', testnet=False):
'Returns a HDPrivateKey object from the mnemonic.'
words = mnemonic.split()
if (len(words) not in (12, 15, 18, 21, 24)):
raise ValueError('you need 12, 15, 18, 21, or 24 words')
number = 0
for word in words:
index = WORD_LOOKUP[word]
number = ((number << 11) | index)
checksum_bits_length = (len(words) // 3)
checksum = (number & ((1 << checksum_bits_length) - 1))
data_num = (number >> checksum_bits_length)
data = int_to_big_endian(data_num, (checksum_bits_length * 4))
computed_checksum = (sha256(data)[0] >> (8 - checksum_bits_length))
if (checksum != computed_checksum):
raise ValueError('words fail checksum: {}'.format(words))
normalized_words = []
for word in words:
normalized_words.append(WORD_LIST[WORD_LOOKUP[word]])
normalized_mnemonic = ' '.join(normalized_words)
salt = (b'mnemonic' + password)
seed = hmac_sha512_kdf(normalized_mnemonic, salt)
return cls.from_seed(seed, testnet=testnet).traverse(path)
| 5,686,827,912,756,427,000
|
Returns a HDPrivateKey object from the mnemonic.
|
session6/hd.py
|
from_mnemonic
|
jimmysong/pw-exercises
|
python
|
@classmethod
def from_mnemonic(cls, mnemonic, password=b, path='m', testnet=False):
words = mnemonic.split()
if (len(words) not in (12, 15, 18, 21, 24)):
raise ValueError('you need 12, 15, 18, 21, or 24 words')
number = 0
for word in words:
index = WORD_LOOKUP[word]
number = ((number << 11) | index)
checksum_bits_length = (len(words) // 3)
checksum = (number & ((1 << checksum_bits_length) - 1))
data_num = (number >> checksum_bits_length)
data = int_to_big_endian(data_num, (checksum_bits_length * 4))
computed_checksum = (sha256(data)[0] >> (8 - checksum_bits_length))
if (checksum != computed_checksum):
raise ValueError('words fail checksum: {}'.format(words))
normalized_words = []
for word in words:
normalized_words.append(WORD_LIST[WORD_LOOKUP[word]])
normalized_mnemonic = ' '.join(normalized_words)
salt = (b'mnemonic' + password)
seed = hmac_sha512_kdf(normalized_mnemonic, salt)
return cls.from_seed(seed, testnet=testnet).traverse(path)
|
def fingerprint(self):
"Fingerprint is the hash160's first 4 bytes"
return self.hash160()[:4]
| 2,325,837,346,603,048,000
|
Fingerprint is the hash160's first 4 bytes
|
session6/hd.py
|
fingerprint
|
jimmysong/pw-exercises
|
python
|
def fingerprint(self):
return self.hash160()[:4]
|
def child(self, index):
'Returns the child HDPrivateKey at a particular index.\n Raises ValueError for indices >= 0x8000000.\n '
if (index >= 2147483648):
raise ValueError('child number should always be less than 2^31')
data = (self.point.sec() + int_to_big_endian(index, 4))
h = hmac_sha512(self.chain_code, data)
point = (self.point + (big_endian_to_int(h[:32]) * G))
chain_code = h[32:]
depth = (self.depth + 1)
parent_fingerprint = self.fingerprint()
child_number = index
return HDPublicKey(point=point, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=self.testnet)
| 2,450,639,498,486,831,000
|
Returns the child HDPrivateKey at a particular index.
Raises ValueError for indices >= 0x8000000.
|
session6/hd.py
|
child
|
jimmysong/pw-exercises
|
python
|
def child(self, index):
'Returns the child HDPrivateKey at a particular index.\n Raises ValueError for indices >= 0x8000000.\n '
if (index >= 2147483648):
raise ValueError('child number should always be less than 2^31')
data = (self.point.sec() + int_to_big_endian(index, 4))
h = hmac_sha512(self.chain_code, data)
point = (self.point + (big_endian_to_int(h[:32]) * G))
chain_code = h[32:]
depth = (self.depth + 1)
parent_fingerprint = self.fingerprint()
child_number = index
return HDPublicKey(point=point, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=self.testnet)
|
def traverse(self, path):
'Returns the HDPublicKey at the path indicated.\n Path should be in the form of m/x/y/z.'
current = self
components = path.split('/')[1:]
for child in components:
if (child[(- 1):] == "'"):
raise ValueError('HDPublicKey cannot get hardened child')
current = current.child(int(child))
return current
| -492,497,757,683,349,200
|
Returns the HDPublicKey at the path indicated.
Path should be in the form of m/x/y/z.
|
session6/hd.py
|
traverse
|
jimmysong/pw-exercises
|
python
|
def traverse(self, path):
'Returns the HDPublicKey at the path indicated.\n Path should be in the form of m/x/y/z.'
current = self
components = path.split('/')[1:]
for child in components:
if (child[(- 1):] == "'"):
raise ValueError('HDPublicKey cannot get hardened child')
current = current.child(int(child))
return current
|
def _pub(self, version):
'Returns the base58-encoded x/y/z pub.\n Expects a 4-byte version.'
raw = self._serialize(version)
return encode_base58_checksum(raw)
| -8,777,945,254,099,310,000
|
Returns the base58-encoded x/y/z pub.
Expects a 4-byte version.
|
session6/hd.py
|
_pub
|
jimmysong/pw-exercises
|
python
|
def _pub(self, version):
'Returns the base58-encoded x/y/z pub.\n Expects a 4-byte version.'
raw = self._serialize(version)
return encode_base58_checksum(raw)
|
@classmethod
def parse(cls, s):
'Returns a HDPublicKey from an extended key string'
raw = raw_decode_base58(s)
if (len(raw) != 78):
raise ValueError('Not a proper extended key')
stream = BytesIO(raw)
return cls.raw_parse(stream)
| -348,898,283,253,583,100
|
Returns a HDPublicKey from an extended key string
|
session6/hd.py
|
parse
|
jimmysong/pw-exercises
|
python
|
@classmethod
def parse(cls, s):
raw = raw_decode_base58(s)
if (len(raw) != 78):
raise ValueError('Not a proper extended key')
stream = BytesIO(raw)
return cls.raw_parse(stream)
|
@classmethod
def raw_parse(cls, s):
'Returns a HDPublicKey from a stream'
version = s.read(4)
if (version in (TESTNET_XPUB, TESTNET_YPUB, TESTNET_ZPUB)):
testnet = True
elif (version in (MAINNET_XPUB, MAINNET_YPUB, MAINNET_ZPUB)):
testnet = False
else:
raise ValueError('not an xpub, ypub or zpub: {} {}'.format(s, version))
depth = byte_to_int(s.read(1))
parent_fingerprint = s.read(4)
child_number = big_endian_to_int(s.read(4))
chain_code = s.read(32)
point = S256Point.parse(s.read(33))
return cls(point=point, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=testnet)
| 7,748,026,601,330,876,000
|
Returns a HDPublicKey from a stream
|
session6/hd.py
|
raw_parse
|
jimmysong/pw-exercises
|
python
|
@classmethod
def raw_parse(cls, s):
version = s.read(4)
if (version in (TESTNET_XPUB, TESTNET_YPUB, TESTNET_ZPUB)):
testnet = True
elif (version in (MAINNET_XPUB, MAINNET_YPUB, MAINNET_ZPUB)):
testnet = False
else:
raise ValueError('not an xpub, ypub or zpub: {} {}'.format(s, version))
depth = byte_to_int(s.read(1))
parent_fingerprint = s.read(4)
child_number = big_endian_to_int(s.read(4))
chain_code = s.read(32)
point = S256Point.parse(s.read(33))
return cls(point=point, chain_code=chain_code, depth=depth, parent_fingerprint=parent_fingerprint, child_number=child_number, testnet=testnet)
|
@cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
| 1,702,168,743,392,494,600
|
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
|
cryptoapis/model/list_assets_details_e400.py
|
additional_properties_type
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
python
|
@cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'details': ([BannedIpAddressDetails],), 'code': (str,), 'message': (str,)}
| -5,576,899,373,819,436,000
|
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
|
cryptoapis/model/list_assets_details_e400.py
|
openapi_types
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
python
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'details': ([BannedIpAddressDetails],), 'code': (str,), 'message': (str,)}
|
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
'ListAssetsDetailsE400 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n details ([BannedIpAddressDetails]): [optional] # noqa: E501\n code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501\n message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
return self
| 2,020,670,956,389,146,600
|
ListAssetsDetailsE400 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
|
cryptoapis/model/list_assets_details_e400.py
|
_from_openapi_data
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
python
|
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
'ListAssetsDetailsE400 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n details ([BannedIpAddressDetails]): [optional] # noqa: E501\n code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501\n message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
return self
|
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'ListAssetsDetailsE400 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n details ([BannedIpAddressDetails]): [optional] # noqa: E501\n code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501\n message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
| -423,784,295,900,482,900
|
ListAssetsDetailsE400 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
|
cryptoapis/model/list_assets_details_e400.py
|
__init__
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
python
|
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'ListAssetsDetailsE400 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n details ([BannedIpAddressDetails]): [optional] # noqa: E501\n code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501\n message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
|
def __init__(self):
'\n Creates the himesis graph representing the AToM3 model HMM10_then1_IsolatedLHS.\n '
self.is_compiled = True
super(HMM10_then1_IsolatedLHS, self).__init__(name='HMM10_then1_IsolatedLHS', num_nodes=0, edges=[])
self.add_edges([])
self['mm__'] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self['MT_constraint__'] = "#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode('n').\n# To access attribute x of node n, use: PreNode('n')['x'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n"
self['name'] = ''
self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'MM10_then1')
self['equations'] = []
| 8,705,714,719,552,520,000
|
Creates the himesis graph representing the AToM3 model HMM10_then1_IsolatedLHS.
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py
|
__init__
|
levilucio/SyVOLT
|
python
|
def __init__(self):
'\n \n '
self.is_compiled = True
super(HMM10_then1_IsolatedLHS, self).__init__(name='HMM10_then1_IsolatedLHS', num_nodes=0, edges=[])
self.add_edges([])
self['mm__'] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self['MT_constraint__'] = "#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode('n').\n# To access attribute x of node n, use: PreNode('n')['x'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n"
self['name'] =
self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'MM10_then1')
self['equations'] = []
|
def constraint(self, PreNode, graph):
'\n Executable constraint code.\n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n '
return True
| -9,135,366,208,570,063,000
|
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM10_then1_IsolatedLHS.py
|
constraint
|
levilucio/SyVOLT
|
python
|
def constraint(self, PreNode, graph):
'\n Executable constraint code.\n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n '
return True
|
def read_symbols(executable, imports=True):
'\n Parse an ELF executable and return a list of (symbol,version) tuples\n for dynamic, imported symbols.\n '
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError(('Could not read symbols for %s: %s' % (executable, stderr.strip())))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if ((len(line) > 7) and re.match(b'[0-9]+:$', line[0])):
(sym, _, version) = line[7].partition(b'@')
is_import = (line[6] == b'UND')
if version.startswith(b'@'):
version = version[1:]
if (is_import == imports):
syms.append((sym, version))
return syms
| -1,495,590,509,076,206,600
|
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
|
contrib/devtools/symbol-check.py
|
read_symbols
|
bitcoinemxmx/GCX
|
python
|
def read_symbols(executable, imports=True):
'\n Parse an ELF executable and return a list of (symbol,version) tuples\n for dynamic, imported symbols.\n '
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError(('Could not read symbols for %s: %s' % (executable, stderr.strip())))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if ((len(line) > 7) and re.match(b'[0-9]+:$', line[0])):
(sym, _, version) = line[7].partition(b'@')
is_import = (line[6] == b'UND')
if version.startswith(b'@'):
version = version[1:]
if (is_import == imports):
syms.append((sym, version))
return syms
|
def build(classifier, X, y=None):
'\n Inner build function that builds a single model.\n '
if isinstance(classifier, type):
classifier = classifier()
model = Pipeline([('preprocessor', NLTKPreprocessor()), ('vectorizer', TfidfVectorizer(tokenizer=identity, preprocessor=None, lowercase=False)), ('classifier', classifier)])
model.fit(X, y)
return model
| 4,710,342,378,621,694,000
|
Inner build function that builds a single model.
|
analyzer/build.py
|
build
|
shobhitagarwal1612/Emotion-Analysis
|
python
|
def build(classifier, X, y=None):
'\n \n '
if isinstance(classifier, type):
classifier = classifier()
model = Pipeline([('preprocessor', NLTKPreprocessor()), ('vectorizer', TfidfVectorizer(tokenizer=identity, preprocessor=None, lowercase=False)), ('classifier', classifier)])
model.fit(X, y)
return model
|
def test_toolchains(self):
'Try each toolchain'
for toolchain in fpgaperf.toolchains.keys():
(device, package) = def_devpack(toolchain)
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), verbose=self.verbose)
| 8,048,555,090,105,903,000
|
Try each toolchain
|
test/test_all.py
|
test_toolchains
|
arn4ud/fpga-tool-perf
|
python
|
def test_toolchains(self):
for toolchain in fpgaperf.toolchains.keys():
(device, package) = def_devpack(toolchain)
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), verbose=self.verbose)
|
def test_pcf(self):
'Try each toolchain with a pcf'
for toolchain in fpgaperf.toolchains.keys():
(device, package) = def_devpack(toolchain)
if ('radiant' in toolchain):
pcf = (fpgaperf.root_dir + '/project/FIXME.pcf')
else:
pcf = (fpgaperf.root_dir + '/project/oneblink_lp8k-cm81.pcf')
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), pcf=pcf, verbose=self.verbose)
| -8,995,371,934,689,438,000
|
Try each toolchain with a pcf
|
test/test_all.py
|
test_pcf
|
arn4ud/fpga-tool-perf
|
python
|
def test_pcf(self):
for toolchain in fpgaperf.toolchains.keys():
(device, package) = def_devpack(toolchain)
if ('radiant' in toolchain):
pcf = (fpgaperf.root_dir + '/project/FIXME.pcf')
else:
pcf = (fpgaperf.root_dir + '/project/oneblink_lp8k-cm81.pcf')
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), pcf=pcf, verbose=self.verbose)
|
def test_seed(self):
'Try seeding, where possible'
random.seed(1234)
for toolchain in fpgaperf.get_seedable():
seed = random.randint(1, 2147483647)
(device, package) = def_devpack(toolchain)
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), seed=seed, verbose=self.verbose)
| 6,710,430,323,428,870,000
|
Try seeding, where possible
|
test/test_all.py
|
test_seed
|
arn4ud/fpga-tool-perf
|
python
|
def test_seed(self):
random.seed(1234)
for toolchain in fpgaperf.get_seedable():
seed = random.randint(1, 2147483647)
(device, package) = def_devpack(toolchain)
fpgaperf.run(family='ice40', device=device, package=package, toolchain=toolchain, project=fpgaperf.get_project('oneblink'), seed=seed, verbose=self.verbose)
|
def __init__(self, config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology, options=None, parameters=None):
'Wrapper for freesasa\n\n config: str\n Path to configuration file containing residue composition\n and atomic parameters - freesasa format.\n options: dict, optional\n Options to change how PDBs are parsed by freesasa.\n parameters: dict, optional\n Parameters to alter how freesasa computes surface area.\n\n '
freesasa.setVerbosity(1)
config = self._update_sasa_config(config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology)
self.classifier = freesasa.Classifier(bytes(str(config), 'utf-8'))
self.options = (options or _DEFAULT_OPTIONS)
self.parameters = (parameters or _DEFAULT_PARAMETERS)
| 4,610,078,083,853,016,600
|
Wrapper for freesasa
config: str
Path to configuration file containing residue composition
and atomic parameters - freesasa format.
options: dict, optional
Options to change how PDBs are parsed by freesasa.
parameters: dict, optional
Parameters to alter how freesasa computes surface area.
|
bac/analyse/wsas/freesasa_utils.py
|
__init__
|
UCL-CCS/BAC2
|
python
|
def __init__(self, config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology, options=None, parameters=None):
'Wrapper for freesasa\n\n config: str\n Path to configuration file containing residue composition\n and atomic parameters - freesasa format.\n options: dict, optional\n Options to change how PDBs are parsed by freesasa.\n parameters: dict, optional\n Parameters to alter how freesasa computes surface area.\n\n '
freesasa.setVerbosity(1)
config = self._update_sasa_config(config, wsas_params, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology)
self.classifier = freesasa.Classifier(bytes(str(config), 'utf-8'))
self.options = (options or _DEFAULT_OPTIONS)
self.parameters = (parameters or _DEFAULT_PARAMETERS)
|
def run(self, pdb):
'Run freesasa on provided PDB file\n\n Parameters\n ----------\n\n pdb: str\n Path to input PDB file\n\n Returns\n -------\n list\n SASA values for each atom of every model in the input PDB.\n\n '
structure_array = freesasa.structureArray(bytes(pdb, 'utf-8'), options=self.options, classifier=self.classifier)
results = []
for s in structure_array:
print('Computing SASA for each model/frame')
result = freesasa.calc(s)
atom_areas = [result.atomArea(ndx) for ndx in range(s.nAtoms())]
results.append(atom_areas)
return results
| 2,183,696,454,969,197,300
|
Run freesasa on provided PDB file
Parameters
----------
pdb: str
Path to input PDB file
Returns
-------
list
SASA values for each atom of every model in the input PDB.
|
bac/analyse/wsas/freesasa_utils.py
|
run
|
UCL-CCS/BAC2
|
python
|
def run(self, pdb):
'Run freesasa on provided PDB file\n\n Parameters\n ----------\n\n pdb: str\n Path to input PDB file\n\n Returns\n -------\n list\n SASA values for each atom of every model in the input PDB.\n\n '
structure_array = freesasa.structureArray(bytes(pdb, 'utf-8'), options=self.options, classifier=self.classifier)
results = []
for s in structure_array:
print('Computing SASA for each model/frame')
result = freesasa.calc(s)
atom_areas = [result.atomArea(ndx) for ndx in range(s.nAtoms())]
results.append(atom_areas)
return results
|
def _update_sasa_config(self, config, parameters, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology):
'\n Add non-standard residues (including the ligand if a topology is\n provided for it) to the freesasa config file.\n\n Parameters\n ----------\n\n Notes\n -----\n Edited config files is saved in self.tmp_dir and\n self.freesasa_config_file is updated to reflect this.\n\n Returns\n -------\n\n '
files_to_add = nonstandard_residue_files
if ligand_topology:
files_to_add.append(ligand_topology)
residues_to_add = {}
for filename in files_to_add:
(residues, gentop) = extract_residue(filename)
residues_to_add.update(residues)
if nonstandard_residue:
residues_to_add.update(nonstandard_residue)
if residues_to_add:
sasa_config = os.path.join(tmp_dir, 'system_sasa.config')
self._add_residues_freesasa_config_file(residues_to_add, sasa_config, parameters, orig_filename=config)
return sasa_config
return config
| -3,815,972,230,154,587,000
|
Add non-standard residues (including the ligand if a topology is
provided for it) to the freesasa config file.
Parameters
----------
Notes
-----
Edited config files is saved in self.tmp_dir and
self.freesasa_config_file is updated to reflect this.
Returns
-------
|
bac/analyse/wsas/freesasa_utils.py
|
_update_sasa_config
|
UCL-CCS/BAC2
|
python
|
def _update_sasa_config(self, config, parameters, tmp_dir, nonstandard_residue_files, nonstandard_residue, ligand_topology):
'\n Add non-standard residues (including the ligand if a topology is\n provided for it) to the freesasa config file.\n\n Parameters\n ----------\n\n Notes\n -----\n Edited config files is saved in self.tmp_dir and\n self.freesasa_config_file is updated to reflect this.\n\n Returns\n -------\n\n '
files_to_add = nonstandard_residue_files
if ligand_topology:
files_to_add.append(ligand_topology)
residues_to_add = {}
for filename in files_to_add:
(residues, gentop) = extract_residue(filename)
residues_to_add.update(residues)
if nonstandard_residue:
residues_to_add.update(nonstandard_residue)
if residues_to_add:
sasa_config = os.path.join(tmp_dir, 'system_sasa.config')
self._add_residues_freesasa_config_file(residues_to_add, sasa_config, parameters, orig_filename=config)
return sasa_config
return config
|
@staticmethod
def _create_freesasa_section_text(new_residues, sasa_atom_params):
'\n Create text to add to freesasa configuration file to incorporate new residue.\n\n Parameters\n ----------\n new_residues : dict\n Non-standard residues to add to the freesasa config file.\n keys = residue names, values = atom name to type mapping (dict).\n sasa_atom_params: dict\n Maps atom type to properties needed by freesasa (radius and polarity).\n\n Returns\n -------\n atom_type_section : str\n Text to be added to freesasa config file atom type section.\n residue_section : str\n Text to be added to freesasa config file residue section.\n\n '
atom_types = []
residue_section = ''
for (res_name, atom_to_type) in new_residues.items():
residue_section += '\n'
for (atom_name, atom_type) in atom_to_type.items():
residue_line = '{:s} {:s} {:s}\n'.format(res_name, atom_name, atom_type)
atom_types.append(atom_type)
residue_section += residue_line
atom_type_section = ''
for atom_type in set(atom_types):
if (atom_type in sasa_atom_params):
atom_line = '{:s} {:.2f} {:s}\n'.format(atom_type, sasa_atom_params[atom_type]['radius'], sasa_atom_params[atom_type]['polarity'])
else:
raise Exception('This atom type was not found to have preset radius and polarity')
atom_type_section += atom_line
return (atom_type_section, residue_section)
| 8,965,076,196,579,469,000
|
Create text to add to freesasa configuration file to incorporate new residue.
Parameters
----------
new_residues : dict
Non-standard residues to add to the freesasa config file.
keys = residue names, values = atom name to type mapping (dict).
sasa_atom_params: dict
Maps atom type to properties needed by freesasa (radius and polarity).
Returns
-------
atom_type_section : str
Text to be added to freesasa config file atom type section.
residue_section : str
Text to be added to freesasa config file residue section.
|
bac/analyse/wsas/freesasa_utils.py
|
_create_freesasa_section_text
|
UCL-CCS/BAC2
|
python
|
@staticmethod
def _create_freesasa_section_text(new_residues, sasa_atom_params):
'\n Create text to add to freesasa configuration file to incorporate new residue.\n\n Parameters\n ----------\n new_residues : dict\n Non-standard residues to add to the freesasa config file.\n keys = residue names, values = atom name to type mapping (dict).\n sasa_atom_params: dict\n Maps atom type to properties needed by freesasa (radius and polarity).\n\n Returns\n -------\n atom_type_section : str\n Text to be added to freesasa config file atom type section.\n residue_section : str\n Text to be added to freesasa config file residue section.\n\n '
atom_types = []
residue_section =
for (res_name, atom_to_type) in new_residues.items():
residue_section += '\n'
for (atom_name, atom_type) in atom_to_type.items():
residue_line = '{:s} {:s} {:s}\n'.format(res_name, atom_name, atom_type)
atom_types.append(atom_type)
residue_section += residue_line
atom_type_section =
for atom_type in set(atom_types):
if (atom_type in sasa_atom_params):
atom_line = '{:s} {:.2f} {:s}\n'.format(atom_type, sasa_atom_params[atom_type]['radius'], sasa_atom_params[atom_type]['polarity'])
else:
raise Exception('This atom type was not found to have preset radius and polarity')
atom_type_section += atom_line
return (atom_type_section, residue_section)
|
def _add_residues_freesasa_config_file(self, new_residues, new_filename, atom_params, orig_filename):
'\n Create a new freesasa config file that adds specified residue to the\n content of an existing copy.\n\n Parameters\n ----------\n new_residues : dict\n Non-standard residues to add to the freesasa config file.\n keys = residue names, values = atom name to type mapping (dict).\n new_filename: str\n Filename to be used for the updated freesasa config file.\n atom_params: dict\n Radius and polarity information for each atom type.\n orig_filename: str\n Filename for the original freesasa config file.\n\n '
(new_atom_types, new_residues) = self._create_freesasa_section_text(new_residues, atom_params)
with open(new_filename, 'w') as out_file, open(orig_filename) as input_config:
[out_file.write(((l + new_atom_types) if l.startswith('# extra') else l)) for l in input_config]
out_file.write(new_residues)
| 3,335,320,457,434,036,700
|
Create a new freesasa config file that adds specified residue to the
content of an existing copy.
Parameters
----------
new_residues : dict
Non-standard residues to add to the freesasa config file.
keys = residue names, values = atom name to type mapping (dict).
new_filename: str
Filename to be used for the updated freesasa config file.
atom_params: dict
Radius and polarity information for each atom type.
orig_filename: str
Filename for the original freesasa config file.
|
bac/analyse/wsas/freesasa_utils.py
|
_add_residues_freesasa_config_file
|
UCL-CCS/BAC2
|
python
|
def _add_residues_freesasa_config_file(self, new_residues, new_filename, atom_params, orig_filename):
'\n Create a new freesasa config file that adds specified residue to the\n content of an existing copy.\n\n Parameters\n ----------\n new_residues : dict\n Non-standard residues to add to the freesasa config file.\n keys = residue names, values = atom name to type mapping (dict).\n new_filename: str\n Filename to be used for the updated freesasa config file.\n atom_params: dict\n Radius and polarity information for each atom type.\n orig_filename: str\n Filename for the original freesasa config file.\n\n '
(new_atom_types, new_residues) = self._create_freesasa_section_text(new_residues, atom_params)
with open(new_filename, 'w') as out_file, open(orig_filename) as input_config:
[out_file.write(((l + new_atom_types) if l.startswith('# extra') else l)) for l in input_config]
out_file.write(new_residues)
|
def splitlines_parser(data):
'A test parser that returns the input data, split by line.'
return data.splitlines()
| 8,821,445,958,649,442,000
|
A test parser that returns the input data, split by line.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
splitlines_parser
|
pybee/briefcase
|
python
|
def splitlines_parser(data):
return data.splitlines()
|
def second_line_parser(data):
'A test parser that returns the second line of input.'
try:
return data.splitlines()[1]
except IndexError:
raise ParseError('Input does not contain 2 lines')
| 5,938,198,887,935,977,000
|
A test parser that returns the second line of input.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
second_line_parser
|
pybee/briefcase
|
python
|
def second_line_parser(data):
try:
return data.splitlines()[1]
except IndexError:
raise ParseError('Input does not contain 2 lines')
|
def third_line_parser(data):
'A test parser that returns the third line of input.'
try:
return data.splitlines()[2]
except IndexError:
raise ParseError('Input does not contain 3 lines')
| 1,768,642,836,130,958,300
|
A test parser that returns the third line of input.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
third_line_parser
|
pybee/briefcase
|
python
|
def third_line_parser(data):
try:
return data.splitlines()[2]
except IndexError:
raise ParseError('Input does not contain 3 lines')
|
def test_call(mock_sub, capsys):
'A simple call to check_output will be invoked.'
output = mock_sub.parse_output(splitlines_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
assert (capsys.readouterr().out == '')
assert (output == ['some output line 1', 'more output line 2'])
| -9,145,021,900,062,871,000
|
A simple call to check_output will be invoked.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
test_call
|
pybee/briefcase
|
python
|
def test_call(mock_sub, capsys):
output = mock_sub.parse_output(splitlines_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
assert (capsys.readouterr().out == )
assert (output == ['some output line 1', 'more output line 2'])
|
def test_call_with_arg(mock_sub, capsys):
'Any extra keyword arguments are passed through as-is to check_output.'
output = mock_sub.parse_output(splitlines_parser, ['hello', 'world'], extra_arg='asdf')
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], extra_arg='asdf', text=True)
assert (capsys.readouterr().out == '')
assert (output == ['some output line 1', 'more output line 2'])
| -6,034,261,138,534,564,000
|
Any extra keyword arguments are passed through as-is to check_output.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
test_call_with_arg
|
pybee/briefcase
|
python
|
def test_call_with_arg(mock_sub, capsys):
output = mock_sub.parse_output(splitlines_parser, ['hello', 'world'], extra_arg='asdf')
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], extra_arg='asdf', text=True)
assert (capsys.readouterr().out == )
assert (output == ['some output line 1', 'more output line 2'])
|
def test_call_with_parser_success(mock_sub, capsys):
"Parser returns expected portion of check_output's output."
output = mock_sub.parse_output(second_line_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
assert (output == 'more output line 2')
| -8,240,593,146,039,259,000
|
Parser returns expected portion of check_output's output.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
test_call_with_parser_success
|
pybee/briefcase
|
python
|
def test_call_with_parser_success(mock_sub, capsys):
output = mock_sub.parse_output(second_line_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
assert (output == 'more output line 2')
|
def test_call_with_parser_error(mock_sub, capsys):
'Parser errors on output from check_output.'
with pytest.raises(CommandOutputParseError, match='Unable to parse command output: Input does not contain 3 lines'):
mock_sub.parse_output(third_line_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
expected_output = '\nCommand Output Parsing Error:\n Input does not contain 3 lines\nCommand:\n hello world\nCommand Output:\n some output line 1\n more output line 2\n'
assert (capsys.readouterr().out == expected_output)
| -4,493,378,662,148,309,000
|
Parser errors on output from check_output.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
test_call_with_parser_error
|
pybee/briefcase
|
python
|
def test_call_with_parser_error(mock_sub, capsys):
with pytest.raises(CommandOutputParseError, match='Unable to parse command output: Input does not contain 3 lines'):
mock_sub.parse_output(third_line_parser, ['hello', 'world'])
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], text=True)
expected_output = '\nCommand Output Parsing Error:\n Input does not contain 3 lines\nCommand:\n hello world\nCommand Output:\n some output line 1\n more output line 2\n'
assert (capsys.readouterr().out == expected_output)
|
@pytest.mark.parametrize('in_kwargs, kwargs', [({}, {'text': True}), ({'text': True}, {'text': True}), ({'text': False}, {'text': False}), ({'universal_newlines': False}, {'universal_newlines': False}), ({'universal_newlines': True}, {'universal_newlines': True})])
def test_text_eq_true_default_overriding(mock_sub, in_kwargs, kwargs):
'if text or universal_newlines is explicitly provided, those should\n override text=true default.'
mock_sub.parse_output(splitlines_parser, ['hello', 'world'], **in_kwargs)
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], **kwargs)
| -8,395,795,399,331,432,000
|
if text or universal_newlines is explicitly provided, those should
override text=true default.
|
tests/integrations/subprocess/test_Subprocess__parse_output.py
|
test_text_eq_true_default_overriding
|
pybee/briefcase
|
python
|
@pytest.mark.parametrize('in_kwargs, kwargs', [({}, {'text': True}), ({'text': True}, {'text': True}), ({'text': False}, {'text': False}), ({'universal_newlines': False}, {'universal_newlines': False}), ({'universal_newlines': True}, {'universal_newlines': True})])
def test_text_eq_true_default_overriding(mock_sub, in_kwargs, kwargs):
'if text or universal_newlines is explicitly provided, those should\n override text=true default.'
mock_sub.parse_output(splitlines_parser, ['hello', 'world'], **in_kwargs)
mock_sub._subprocess.check_output.assert_called_with(['hello', 'world'], **kwargs)
|
def _execute(self, state: GlobalState) -> None:
'\n\n :param state:\n :return:\n '
if (state.get_current_instruction()['address'] in self.cache):
return
issues = self._analyze_state(state)
for issue in issues:
self.cache.add(issue.address)
self.issues.extend(issues)
| -3,896,410,076,598,750,000
|
:param state:
:return:
|
mythril/analysis/module/modules/dependence_on_predictable_vars.py
|
_execute
|
marcuswin/mythril
|
python
|
def _execute(self, state: GlobalState) -> None:
'\n\n :param state:\n :return:\n '
if (state.get_current_instruction()['address'] in self.cache):
return
issues = self._analyze_state(state)
for issue in issues:
self.cache.add(issue.address)
self.issues.extend(issues)
|
@staticmethod
def _analyze_state(state: GlobalState) -> list:
'\n\n :param state:\n :return:\n '
issues = []
if is_prehook():
opcode = state.get_current_instruction()['opcode']
if (opcode in final_ops):
for annotation in state.annotations:
if isinstance(annotation, PredictablePathAnnotation):
if annotation.add_constraints:
constraints = (state.world_state.constraints + annotation.add_constraints)
else:
constraints = copy(state.world_state.constraints)
try:
transaction_sequence = solver.get_transaction_sequence(state, constraints)
except UnsatError:
continue
description = (('The ' + annotation.operation) + ' is used in to determine a control flow decision. ')
description += "Note that the values of variables like coinbase, gaslimit, block number and timestamp are predictable and can be manipulated by a malicious miner. Also keep in mind that attackers know hashes of earlier blocks. Don't use any of those environment variables for random number generation or to make critical control flow decisions."
'\n Usually report low severity except in cases where the hash of a previous block is used to\n determine control flow. \n '
severity = ('Medium' if ('hash' in annotation.operation) else 'Low')
'\n Note: We report the location of the JUMPI that lead to this path. Usually this maps to an if or\n require statement.\n '
swc_id = (TIMESTAMP_DEPENDENCE if ('timestamp' in annotation.operation) else WEAK_RANDOMNESS)
issue = Issue(contract=state.environment.active_account.contract_name, function_name=state.environment.active_function_name, address=annotation.location, swc_id=swc_id, bytecode=state.environment.code.bytecode, title='Dependence on predictable environment variable', severity=severity, description_head='A control flow decision is made based on a predictable variable.', description_tail=description, gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used), transaction_sequence=transaction_sequence)
issues.append(issue)
elif (opcode == 'JUMPI'):
for annotation in state.mstate.stack[(- 2)].annotations:
if isinstance(annotation, PredictableValueAnnotation):
state.annotate(PredictablePathAnnotation(annotation.operation, state.get_current_instruction()['address'], add_constraints=annotation.add_constraints))
break
elif (opcode == 'BLOCKHASH'):
param = state.mstate.stack[(- 1)]
try:
constraint = [ULT(param, state.environment.block_number), ULT(state.environment.block_number, symbol_factory.BitVecVal((2 ** 255), 256))]
solver.get_model((state.world_state.constraints + constraint))
state.annotate(OldBlockNumberUsedAnnotation(constraint))
except UnsatError:
pass
else:
opcode = state.environment.code.instruction_list[(state.mstate.pc - 1)]['opcode']
if (opcode == 'BLOCKHASH'):
annotations = cast(List[OldBlockNumberUsedAnnotation], list(state.get_annotations(OldBlockNumberUsedAnnotation)))
if len(annotations):
state.mstate.stack[(- 1)].annotate(PredictableValueAnnotation('block hash of a previous block', add_constraints=annotations[0].block_constraints))
else:
state.mstate.stack[(- 1)].annotate(PredictableValueAnnotation('block.{} environment variable'.format(opcode.lower())))
return issues
| -7,975,389,300,939,921,000
|
:param state:
:return:
|
mythril/analysis/module/modules/dependence_on_predictable_vars.py
|
_analyze_state
|
marcuswin/mythril
|
python
|
@staticmethod
def _analyze_state(state: GlobalState) -> list:
'\n\n :param state:\n :return:\n '
issues = []
if is_prehook():
opcode = state.get_current_instruction()['opcode']
if (opcode in final_ops):
for annotation in state.annotations:
if isinstance(annotation, PredictablePathAnnotation):
if annotation.add_constraints:
constraints = (state.world_state.constraints + annotation.add_constraints)
else:
constraints = copy(state.world_state.constraints)
try:
transaction_sequence = solver.get_transaction_sequence(state, constraints)
except UnsatError:
continue
description = (('The ' + annotation.operation) + ' is used in to determine a control flow decision. ')
description += "Note that the values of variables like coinbase, gaslimit, block number and timestamp are predictable and can be manipulated by a malicious miner. Also keep in mind that attackers know hashes of earlier blocks. Don't use any of those environment variables for random number generation or to make critical control flow decisions."
'\n Usually report low severity except in cases where the hash of a previous block is used to\n determine control flow. \n '
severity = ('Medium' if ('hash' in annotation.operation) else 'Low')
'\n Note: We report the location of the JUMPI that lead to this path. Usually this maps to an if or\n require statement.\n '
swc_id = (TIMESTAMP_DEPENDENCE if ('timestamp' in annotation.operation) else WEAK_RANDOMNESS)
issue = Issue(contract=state.environment.active_account.contract_name, function_name=state.environment.active_function_name, address=annotation.location, swc_id=swc_id, bytecode=state.environment.code.bytecode, title='Dependence on predictable environment variable', severity=severity, description_head='A control flow decision is made based on a predictable variable.', description_tail=description, gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used), transaction_sequence=transaction_sequence)
issues.append(issue)
elif (opcode == 'JUMPI'):
for annotation in state.mstate.stack[(- 2)].annotations:
if isinstance(annotation, PredictableValueAnnotation):
state.annotate(PredictablePathAnnotation(annotation.operation, state.get_current_instruction()['address'], add_constraints=annotation.add_constraints))
break
elif (opcode == 'BLOCKHASH'):
param = state.mstate.stack[(- 1)]
try:
constraint = [ULT(param, state.environment.block_number), ULT(state.environment.block_number, symbol_factory.BitVecVal((2 ** 255), 256))]
solver.get_model((state.world_state.constraints + constraint))
state.annotate(OldBlockNumberUsedAnnotation(constraint))
except UnsatError:
pass
else:
opcode = state.environment.code.instruction_list[(state.mstate.pc - 1)]['opcode']
if (opcode == 'BLOCKHASH'):
annotations = cast(List[OldBlockNumberUsedAnnotation], list(state.get_annotations(OldBlockNumberUsedAnnotation)))
if len(annotations):
state.mstate.stack[(- 1)].annotate(PredictableValueAnnotation('block hash of a previous block', add_constraints=annotations[0].block_constraints))
else:
state.mstate.stack[(- 1)].annotate(PredictableValueAnnotation('block.{} environment variable'.format(opcode.lower())))
return issues
|
def test_sksurgerytextoverlay():
' Basic test to run the widget and make sure everything loads OK.'
if (sys.platform == 'darwin'):
pytest.skip('Test not working on Mac runner')
input_file = 'tests/data/test_video.avi'
gui = TextOverlayDemo(input_file)
gui.start()
| 6,263,612,313,655,906,000
|
Basic test to run the widget and make sure everything loads OK.
|
tests/test_sksurgerytextoverlay.py
|
test_sksurgerytextoverlay
|
SciKit-Surgery/scikit-surgeryutils
|
python
|
def test_sksurgerytextoverlay():
' '
if (sys.platform == 'darwin'):
pytest.skip('Test not working on Mac runner')
input_file = 'tests/data/test_video.avi'
gui = TextOverlayDemo(input_file)
gui.start()
|
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query(self):
'Checks whether execution of composed SQL string is traced'
query = SQL(' union all ').join([SQL('select {} as x').format(Literal('one')), SQL('select {} as x').format(Literal('two'))])
db = self._get_conn()
with db.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert (len(rows) == 2), rows
assert (rows[0][0] == 'one')
assert (rows[1][0] == 'two')
assert_is_measured(self.get_root_span())
self.assert_structure(dict(name='postgres.query', resource=query.as_string(db)))
| 4,491,225,874,725,807,600
|
Checks whether execution of composed SQL string is traced
|
tests/contrib/psycopg/test_psycopg.py
|
test_composed_query
|
discord/dd-trace-py
|
python
|
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query(self):
query = SQL(' union all ').join([SQL('select {} as x').format(Literal('one')), SQL('select {} as x').format(Literal('two'))])
db = self._get_conn()
with db.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert (len(rows) == 2), rows
assert (rows[0][0] == 'one')
assert (rows[1][0] == 'two')
assert_is_measured(self.get_root_span())
self.assert_structure(dict(name='postgres.query', resource=query.as_string(db)))
|
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query_identifier(self):
'Checks whether execution of composed SQL string is traced'
db = self._get_conn()
with db.cursor() as cur:
cur.execute('CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);')
cur.execute('INSERT INTO test (name) VALUES (%s);', ('test_case',))
spans = self.get_spans()
assert (len(spans) == 2)
self.reset()
query = SQL('select {}, {} from {}').format(Identifier('id'), Identifier('name'), Identifier('test'))
cur.execute(query=query)
rows = cur.fetchall()
assert (rows == [(1, 'test_case')])
assert_is_measured(self.get_root_span())
self.assert_structure(dict(name='postgres.query', resource=query.as_string(db)))
| 6,862,115,182,421,737,000
|
Checks whether execution of composed SQL string is traced
|
tests/contrib/psycopg/test_psycopg.py
|
test_composed_query_identifier
|
discord/dd-trace-py
|
python
|
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query_identifier(self):
db = self._get_conn()
with db.cursor() as cur:
cur.execute('CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);')
cur.execute('INSERT INTO test (name) VALUES (%s);', ('test_case',))
spans = self.get_spans()
assert (len(spans) == 2)
self.reset()
query = SQL('select {}, {} from {}').format(Identifier('id'), Identifier('name'), Identifier('test'))
cur.execute(query=query)
rows = cur.fetchall()
assert (rows == [(1, 'test_case')])
assert_is_measured(self.get_root_span())
self.assert_structure(dict(name='postgres.query', resource=query.as_string(db)))
|
@snapshot()
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query_encoding(self):
'Checks whether execution of composed SQL string is traced'
import logging
logger = logging.getLogger()
logger.level = logging.DEBUG
query = SQL(' union all ').join([SQL("select 'one' as x"), SQL("select 'two' as x")])
conn = psycopg2.connect(**POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert (len(rows) == 2), rows
assert (rows[0][0] == 'one')
assert (rows[1][0] == 'two')
| 8,238,151,430,507,487,000
|
Checks whether execution of composed SQL string is traced
|
tests/contrib/psycopg/test_psycopg.py
|
test_composed_query_encoding
|
discord/dd-trace-py
|
python
|
@snapshot()
@skipIf((PSYCOPG2_VERSION < (2, 7)), 'SQL string composition not available in psycopg2<2.7')
def test_composed_query_encoding(self):
import logging
logger = logging.getLogger()
logger.level = logging.DEBUG
query = SQL(' union all ').join([SQL("select 'one' as x"), SQL("select 'two' as x")])
conn = psycopg2.connect(**POSTGRES_CONFIG)
with conn.cursor() as cur:
cur.execute(query=query)
rows = cur.fetchall()
assert (len(rows) == 2), rows
assert (rows[0][0] == 'one')
assert (rows[1][0] == 'two')
|
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE='mysvc'))
def test_user_specified_app_service(self):
'\n When a user specifies a service for the app\n The psycopg integration should not use it.\n '
from ddtrace import config
assert (config.service == 'mysvc')
conn = self._get_conn()
conn.cursor().execute("select 'blah'")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert (spans[0].service != 'mysvc')
| 4,316,217,354,407,632,400
|
When a user specifies a service for the app
The psycopg integration should not use it.
|
tests/contrib/psycopg/test_psycopg.py
|
test_user_specified_app_service
|
discord/dd-trace-py
|
python
|
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE='mysvc'))
def test_user_specified_app_service(self):
'\n When a user specifies a service for the app\n The psycopg integration should not use it.\n '
from ddtrace import config
assert (config.service == 'mysvc')
conn = self._get_conn()
conn.cursor().execute("select 'blah'")
spans = self.get_spans()
self.assertEqual(len(spans), 1)
assert (spans[0].service != 'mysvc')
|
def dcan2fmriprepx(dcan_dir, out_dir, sub_id):
'\n dcan2fmriprep(dcan_dir,out_dir)\n '
sess = glob.glob((((dcan_dir + '/') + sub_id) + '/s*'))
ses_id = []
ses_id = [j.split('ses-')[1] for j in sess]
for ses in ses_id:
anat_dirx = (((((dcan_dir + '/') + sub_id) + '/ses-') + ses) + '/files/MNINonLinear/')
anatdir = (((((out_dir + '/') + sub_id) + '/ses-') + ses) + '/anat/')
os.makedirs(anatdir, exist_ok=True)
sess = ('ses-' + ses)
tw1 = (anat_dirx + '/T1w.nii.gz')
brainmask = (anat_dirx + '/brainmask_fs.nii.gz')
ribbon = (anat_dirx + '/ribbon.nii.gz')
segm = (anat_dirx + '/aparc+aseg.nii.gz')
midR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.midthickness.32k_fs_LR.surf.gii'))[0]
midL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.midthickness.32k_fs_LR.surf.gii'))[0]
infR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.inflated.32k_fs_LR.surf.gii'))[0]
infL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.inflated.32k_fs_LR.surf.gii'))[0]
pialR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.pial.32k_fs_LR.surf.gii'))[0]
pialL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.pial.32k_fs_LR.surf.gii'))[0]
whiteR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.white.32k_fs_LR.surf.gii'))[0]
whiteL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.white.32k_fs_LR.surf.gii'))[0]
dcanimages = [tw1, segm, ribbon, brainmask, tw1, tw1, midL, midR, pialL, pialR, whiteL, whiteR, infL, infR]
t1wim = ((((anatdir + sub_id) + '_') + sess) + '_desc-preproc_T1w.nii.gz')
t1seg = ((((anatdir + sub_id) + '_') + sess) + '_dseg.nii.gz')
t1ribbon = ((((anatdir + sub_id) + '_') + sess) + '_desc-ribbon_T1w.nii.gz')
t1brainm = ((((anatdir + sub_id) + '_') + sess) + '_desc-brain_mask.nii.gz')
regfile1 = ((((anatdir + sub_id) + '_') + sess) + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5')
regfile2 = ((((anatdir + sub_id) + '_') + sess) + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5')
lMid = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_midthickness.surf.gii')
rMid = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_midthickness.surf.gii')
lpial = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_pial.surf.gii')
rpial = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_pial.surf.gii')
lwhite = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_smoothwm.surf.gii')
rwhite = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_smoothwm.surf.gii')
linf = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_inflated.surf.gii')
rinf = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_inflated.surf.gii')
newanatfiles = [t1wim, t1seg, t1ribbon, t1brainm, regfile1, regfile2, lMid, rMid, lpial, rpial, lwhite, rwhite, linf, rinf]
for (i, j) in zip(dcanimages, newanatfiles):
symlinkfiles(i, j)
wmmask = glob.glob((anat_dirx + '/wm_2mm_*_mask_eroded.nii.gz'))[0]
csfmask = glob.glob((anat_dirx + '/vent_2mm_*_mask_eroded.nii.gz'))[0]
tw1tonative = (anat_dirx + 'xfms/T1w_to_MNI_0GenericAffine.mat')
func_dirx = (((((dcan_dir + '/') + sub_id) + '/ses-') + ses_id[0]) + '/files/MNINonLinear/Results/')
taskd = glob.glob((func_dirx + 'task-*'))
taskid = []
for k in taskd:
if (not os.path.isfile(k)):
taskid.append(os.path.basename(k).split('-')[1])
func_dir = (((((out_dir + '/') + sub_id) + '/ses-') + ses) + '/func/')
os.makedirs(func_dir, exist_ok=True)
ses_id = ('ses-' + ses)
for ttt in taskid:
taskdir = ('task-' + ttt)
taskname = re.split('(\\d+)', ttt)[0]
run_id = ('_run-' + str(re.split('(\\d+)', ttt)[1]))
func_dirxx = (func_dirx + taskdir)
sbref = (((func_dirxx + '/') + taskdir) + '_SBRef.nii.gz')
volume = (((func_dirxx + '/') + taskdir) + '.nii.gz')
brainmask = (func_dirxx + '/brainmask_fs.2.0.nii.gz')
dtsereis = (((func_dirxx + '/') + taskdir) + '_Atlas.dtseries.nii')
motionp = (func_dirxx + '/Movement_Regressors.txt')
rmsdx = (func_dirxx + '/Movement_AbsoluteRMS.txt')
mvreg = pd.read_csv(motionp, header=None, delimiter='\\s+')
mvreg = mvreg.iloc[:, 0:6]
mvreg.columns = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
mvreg['rot_x'] = ((mvreg['rot_x'] * np.pi) / 180)
mvreg['rot_y'] = ((mvreg['rot_y'] * np.pi) / 180)
mvreg['rot_z'] = ((mvreg['rot_z'] * np.pi) / 180)
csfreg = extractreg(mask=csfmask, nifti=volume)
wmreg = extractreg(mask=wmmask, nifti=volume)
gsreg = extractreg(mask=brainmask, nifti=volume)
rsmd = np.loadtxt(rmsdx)
brainreg = pd.DataFrame({'global_signal': gsreg, 'white_matter': wmreg, 'csf': csfreg, 'rmsd': rsmd})
regressors = pd.concat([mvreg, brainreg], axis=1)
dcanfunfiles = [sbref, dtsereis, tw1tonative, tw1tonative]
tr = nb.load(volume).header.get_zooms()[(- 1)]
jsontis = {'RepetitionTime': np.float(tr), 'TaskName': taskname}
json2 = {'grayordinates': '91k', 'space': 'HCP grayordinates', 'surface': 'fsLR', 'surface_density': '32k', 'volume': 'MNI152NLin6Asym'}
boldjson = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-MNI152NLin6Asym_desc-preproc_bold.json')
confreg = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-confounds_timeseries.tsv')
confregj = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-confounds_timeseries.json')
boldref = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-MNI152NLin6Asym_boldref.nii.gz')
dttseriesx = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-fsLR_den-91k_bold.dtseries.nii')
dttseriesj = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-fsLR_den-91k_bold.dtseries.json')
native2t1w = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_from-scanner_to-T1w_mode-image_xfm.txt')
t12native = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_from-T1w_to-scanner_mode-image_xfm.txt')
fmfuncfiles = [boldref, dttseriesx, native2t1w, t12native]
for (jj, kk) in zip(dcanfunfiles, fmfuncfiles):
symlinkfiles(jj, kk)
figdir = (((out_dir + '/') + sub_id) + '/figures/')
os.makedirs(figdir, exist_ok=True)
bbreg = (((((((figdir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-bbregister_bold.svg')
bbreg = bbregplot(fixed_image=tw1, moving_image=boldref, out_file=bbreg, contour=ribbon)
writejson(jsontis, boldjson)
writejson(json2, dttseriesj)
writejson(json2, confregj)
regressors.to_csv(confreg, sep='\t', index=False)
dcanjosn = {'Name': 'ABCDDCAN', 'BIDSVersion': '1.4.0', 'DatasetType': 'derivative', 'GeneratedBy': [{'Name': 'DCAN', 'Version': '0.0.4', 'CodeURL': 'https://github.com/DCAN-Labs/abcd-hcp-pipeline'}]}
writejson(dcanjosn, (out_dir + '/dataset_description.json'))
return dcanjosn
| 8,666,387,277,339,448,000
|
dcan2fmriprep(dcan_dir,out_dir)
|
xcp_abcd/utils/dcan2fmriprep.py
|
dcan2fmriprepx
|
PennLINC/xcp_abcd
|
python
|
def dcan2fmriprepx(dcan_dir, out_dir, sub_id):
'\n \n '
sess = glob.glob((((dcan_dir + '/') + sub_id) + '/s*'))
ses_id = []
ses_id = [j.split('ses-')[1] for j in sess]
for ses in ses_id:
anat_dirx = (((((dcan_dir + '/') + sub_id) + '/ses-') + ses) + '/files/MNINonLinear/')
anatdir = (((((out_dir + '/') + sub_id) + '/ses-') + ses) + '/anat/')
os.makedirs(anatdir, exist_ok=True)
sess = ('ses-' + ses)
tw1 = (anat_dirx + '/T1w.nii.gz')
brainmask = (anat_dirx + '/brainmask_fs.nii.gz')
ribbon = (anat_dirx + '/ribbon.nii.gz')
segm = (anat_dirx + '/aparc+aseg.nii.gz')
midR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.midthickness.32k_fs_LR.surf.gii'))[0]
midL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.midthickness.32k_fs_LR.surf.gii'))[0]
infR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.inflated.32k_fs_LR.surf.gii'))[0]
infL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.inflated.32k_fs_LR.surf.gii'))[0]
pialR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.pial.32k_fs_LR.surf.gii'))[0]
pialL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.pial.32k_fs_LR.surf.gii'))[0]
whiteR = glob.glob((anat_dirx + '/fsaverage_LR32k/*R.white.32k_fs_LR.surf.gii'))[0]
whiteL = glob.glob((anat_dirx + '/fsaverage_LR32k/*L.white.32k_fs_LR.surf.gii'))[0]
dcanimages = [tw1, segm, ribbon, brainmask, tw1, tw1, midL, midR, pialL, pialR, whiteL, whiteR, infL, infR]
t1wim = ((((anatdir + sub_id) + '_') + sess) + '_desc-preproc_T1w.nii.gz')
t1seg = ((((anatdir + sub_id) + '_') + sess) + '_dseg.nii.gz')
t1ribbon = ((((anatdir + sub_id) + '_') + sess) + '_desc-ribbon_T1w.nii.gz')
t1brainm = ((((anatdir + sub_id) + '_') + sess) + '_desc-brain_mask.nii.gz')
regfile1 = ((((anatdir + sub_id) + '_') + sess) + '_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5')
regfile2 = ((((anatdir + sub_id) + '_') + sess) + '_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5')
lMid = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_midthickness.surf.gii')
rMid = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_midthickness.surf.gii')
lpial = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_pial.surf.gii')
rpial = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_pial.surf.gii')
lwhite = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_smoothwm.surf.gii')
rwhite = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_smoothwm.surf.gii')
linf = ((((anatdir + sub_id) + '_') + sess) + '_hemi-L_inflated.surf.gii')
rinf = ((((anatdir + sub_id) + '_') + sess) + '_hemi-R_inflated.surf.gii')
newanatfiles = [t1wim, t1seg, t1ribbon, t1brainm, regfile1, regfile2, lMid, rMid, lpial, rpial, lwhite, rwhite, linf, rinf]
for (i, j) in zip(dcanimages, newanatfiles):
symlinkfiles(i, j)
wmmask = glob.glob((anat_dirx + '/wm_2mm_*_mask_eroded.nii.gz'))[0]
csfmask = glob.glob((anat_dirx + '/vent_2mm_*_mask_eroded.nii.gz'))[0]
tw1tonative = (anat_dirx + 'xfms/T1w_to_MNI_0GenericAffine.mat')
func_dirx = (((((dcan_dir + '/') + sub_id) + '/ses-') + ses_id[0]) + '/files/MNINonLinear/Results/')
taskd = glob.glob((func_dirx + 'task-*'))
taskid = []
for k in taskd:
if (not os.path.isfile(k)):
taskid.append(os.path.basename(k).split('-')[1])
func_dir = (((((out_dir + '/') + sub_id) + '/ses-') + ses) + '/func/')
os.makedirs(func_dir, exist_ok=True)
ses_id = ('ses-' + ses)
for ttt in taskid:
taskdir = ('task-' + ttt)
taskname = re.split('(\\d+)', ttt)[0]
run_id = ('_run-' + str(re.split('(\\d+)', ttt)[1]))
func_dirxx = (func_dirx + taskdir)
sbref = (((func_dirxx + '/') + taskdir) + '_SBRef.nii.gz')
volume = (((func_dirxx + '/') + taskdir) + '.nii.gz')
brainmask = (func_dirxx + '/brainmask_fs.2.0.nii.gz')
dtsereis = (((func_dirxx + '/') + taskdir) + '_Atlas.dtseries.nii')
motionp = (func_dirxx + '/Movement_Regressors.txt')
rmsdx = (func_dirxx + '/Movement_AbsoluteRMS.txt')
mvreg = pd.read_csv(motionp, header=None, delimiter='\\s+')
mvreg = mvreg.iloc[:, 0:6]
mvreg.columns = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z']
mvreg['rot_x'] = ((mvreg['rot_x'] * np.pi) / 180)
mvreg['rot_y'] = ((mvreg['rot_y'] * np.pi) / 180)
mvreg['rot_z'] = ((mvreg['rot_z'] * np.pi) / 180)
csfreg = extractreg(mask=csfmask, nifti=volume)
wmreg = extractreg(mask=wmmask, nifti=volume)
gsreg = extractreg(mask=brainmask, nifti=volume)
rsmd = np.loadtxt(rmsdx)
brainreg = pd.DataFrame({'global_signal': gsreg, 'white_matter': wmreg, 'csf': csfreg, 'rmsd': rsmd})
regressors = pd.concat([mvreg, brainreg], axis=1)
dcanfunfiles = [sbref, dtsereis, tw1tonative, tw1tonative]
tr = nb.load(volume).header.get_zooms()[(- 1)]
jsontis = {'RepetitionTime': np.float(tr), 'TaskName': taskname}
json2 = {'grayordinates': '91k', 'space': 'HCP grayordinates', 'surface': 'fsLR', 'surface_density': '32k', 'volume': 'MNI152NLin6Asym'}
boldjson = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-MNI152NLin6Asym_desc-preproc_bold.json')
confreg = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-confounds_timeseries.tsv')
confregj = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-confounds_timeseries.json')
boldref = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-MNI152NLin6Asym_boldref.nii.gz')
dttseriesx = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-fsLR_den-91k_bold.dtseries.nii')
dttseriesj = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_space-fsLR_den-91k_bold.dtseries.json')
native2t1w = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_from-scanner_to-T1w_mode-image_xfm.txt')
t12native = (((((((func_dir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_from-T1w_to-scanner_mode-image_xfm.txt')
fmfuncfiles = [boldref, dttseriesx, native2t1w, t12native]
for (jj, kk) in zip(dcanfunfiles, fmfuncfiles):
symlinkfiles(jj, kk)
figdir = (((out_dir + '/') + sub_id) + '/figures/')
os.makedirs(figdir, exist_ok=True)
bbreg = (((((((figdir + sub_id) + '_') + ses_id) + '_task-') + taskname) + run_id) + '_desc-bbregister_bold.svg')
bbreg = bbregplot(fixed_image=tw1, moving_image=boldref, out_file=bbreg, contour=ribbon)
writejson(jsontis, boldjson)
writejson(json2, dttseriesj)
writejson(json2, confregj)
regressors.to_csv(confreg, sep='\t', index=False)
dcanjosn = {'Name': 'ABCDDCAN', 'BIDSVersion': '1.4.0', 'DatasetType': 'derivative', 'GeneratedBy': [{'Name': 'DCAN', 'Version': '0.0.4', 'CodeURL': 'https://github.com/DCAN-Labs/abcd-hcp-pipeline'}]}
writejson(dcanjosn, (out_dir + '/dataset_description.json'))
return dcanjosn
|
def copyfileobj_example(source, dest, buffer_size=((1024 * 1024) * 1024)):
' \n Copy a file from source to dest. source and dest\n must be file-like objects, i.e. any object with a read or\n write method, like for example StringIO.\n '
while True:
copy_buffer = source.read(buffer_size)
if (not copy_buffer):
break
dest.write(copy_buffer)
| -4,312,684,828,816,021,500
|
Copy a file from source to dest. source and dest
must be file-like objects, i.e. any object with a read or
write method, like for example StringIO.
|
xcp_abcd/utils/dcan2fmriprep.py
|
copyfileobj_example
|
PennLINC/xcp_abcd
|
python
|
def copyfileobj_example(source, dest, buffer_size=((1024 * 1024) * 1024)):
' \n Copy a file from source to dest. source and dest\n must be file-like objects, i.e. any object with a read or\n write method, like for example StringIO.\n '
while True:
copy_buffer = source.read(buffer_size)
if (not copy_buffer):
break
dest.write(copy_buffer)
|
def forward(self, imgs, bboxes, labels, scale):
'Forward Faster R-CNN and calculate losses.\n\n Here are notations used.\n\n * :math:`N` is the batch size.\n * :math:`R` is the number of bounding boxes per image.\n\n Currently, only :math:`N=1` is supported.\n\n Args:\n imgs (~torch.autograd.Variable): A variable with a batch of images.\n bboxes (~torch.autograd.Variable): A batch of bounding boxes.\n Its shape is :math:`(N, R, 4)`.\n labels (~torch.autograd..Variable): A batch of labels.\n Its shape is :math:`(N, R)`. The background is excluded from\n the definition, which means that the range of the value\n is :math:`[0, L - 1]`. :math:`L` is the number of foreground\n classes.\n scale (float): Amount of scaling applied to\n the raw image during preprocessing.\n\n Returns:\n namedtuple of 5 losses\n '
n = bboxes.shape[0]
if (n != 1):
raise ValueError('Currently only batch size 1 is supported.')
(_, _, H, W) = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
(rpn_locs, rpn_scores, rois, roi_indices, anchor) = self.faster_rcnn.rpn(features, img_size, scale)
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
(sample_roi, gt_roi_loc, gt_roi_label) = self.proposal_target_creator(roi, at.tonumpy(bbox), at.tonumpy(label), self.loc_normalize_mean, self.loc_normalize_std)
sample_roi_index = t.zeros(len(sample_roi))
(roi_cls_loc, roi_score) = self.faster_rcnn.head(features, sample_roi, sample_roi_index)
(gt_rpn_loc, gt_rpn_label) = self.anchor_target_creator(at.tonumpy(bbox), anchor, img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(rpn_loc, gt_rpn_loc, gt_rpn_label.data, self.rpn_sigma)
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=(- 1))
_gt_rpn_label = gt_rpn_label[(gt_rpn_label > (- 1))]
_rpn_score = at.tonumpy(rpn_score)[(at.tonumpy(gt_rpn_label) > (- 1))]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, (- 1), 4)
roi_loc = roi_cls_loc[(t.arange(0, n_sample).long().cuda(), at.totensor(gt_roi_label).long())]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(roi_loc.contiguous(), gt_roi_loc, gt_roi_label.data, self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = (losses + [sum(losses)])
return LossTuple(*losses)
| -7,758,097,655,763,915,000
|
Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~torch.autograd.Variable): A variable with a batch of images.
bboxes (~torch.autograd.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~torch.autograd..Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float): Amount of scaling applied to
the raw image during preprocessing.
Returns:
namedtuple of 5 losses
|
baseline/fast_rcnn/trainer.py
|
forward
|
ITMO-NSS-team/LightObjRecEnsembler
|
python
|
def forward(self, imgs, bboxes, labels, scale):
'Forward Faster R-CNN and calculate losses.\n\n Here are notations used.\n\n * :math:`N` is the batch size.\n * :math:`R` is the number of bounding boxes per image.\n\n Currently, only :math:`N=1` is supported.\n\n Args:\n imgs (~torch.autograd.Variable): A variable with a batch of images.\n bboxes (~torch.autograd.Variable): A batch of bounding boxes.\n Its shape is :math:`(N, R, 4)`.\n labels (~torch.autograd..Variable): A batch of labels.\n Its shape is :math:`(N, R)`. The background is excluded from\n the definition, which means that the range of the value\n is :math:`[0, L - 1]`. :math:`L` is the number of foreground\n classes.\n scale (float): Amount of scaling applied to\n the raw image during preprocessing.\n\n Returns:\n namedtuple of 5 losses\n '
n = bboxes.shape[0]
if (n != 1):
raise ValueError('Currently only batch size 1 is supported.')
(_, _, H, W) = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
(rpn_locs, rpn_scores, rois, roi_indices, anchor) = self.faster_rcnn.rpn(features, img_size, scale)
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
(sample_roi, gt_roi_loc, gt_roi_label) = self.proposal_target_creator(roi, at.tonumpy(bbox), at.tonumpy(label), self.loc_normalize_mean, self.loc_normalize_std)
sample_roi_index = t.zeros(len(sample_roi))
(roi_cls_loc, roi_score) = self.faster_rcnn.head(features, sample_roi, sample_roi_index)
(gt_rpn_loc, gt_rpn_label) = self.anchor_target_creator(at.tonumpy(bbox), anchor, img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(rpn_loc, gt_rpn_loc, gt_rpn_label.data, self.rpn_sigma)
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=(- 1))
_gt_rpn_label = gt_rpn_label[(gt_rpn_label > (- 1))]
_rpn_score = at.tonumpy(rpn_score)[(at.tonumpy(gt_rpn_label) > (- 1))]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, (- 1), 4)
roi_loc = roi_cls_loc[(t.arange(0, n_sample).long().cuda(), at.totensor(gt_roi_label).long())]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(roi_loc.contiguous(), gt_roi_loc, gt_roi_label.data, self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = (losses + [sum(losses)])
return LossTuple(*losses)
|
def save(self, save_optimizer=False, save_path=None, **kwargs):
"serialize models include optimizer and other info\n return path where the model-file is stored.\n\n Args:\n save_optimizer (bool): whether save optimizer.state_dict().\n save_path (string): where to save model, if it's None, save_path\n is generate using time str and info from kwargs.\n \n Returns:\n save_path(str): the path to save models.\n "
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if (save_path is None):
timestr = time.strftime('%m%d%H%M')
save_path = ('checkpoints/fasterrcnn_%s' % timestr)
for (k_, v_) in kwargs.items():
save_path += ('_%s' % v_)
save_dir = os.path.dirname(save_path)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
| -2,786,790,712,384,780,000
|
serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
|
baseline/fast_rcnn/trainer.py
|
save
|
ITMO-NSS-team/LightObjRecEnsembler
|
python
|
def save(self, save_optimizer=False, save_path=None, **kwargs):
"serialize models include optimizer and other info\n return path where the model-file is stored.\n\n Args:\n save_optimizer (bool): whether save optimizer.state_dict().\n save_path (string): where to save model, if it's None, save_path\n is generate using time str and info from kwargs.\n \n Returns:\n save_path(str): the path to save models.\n "
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if (save_path is None):
timestr = time.strftime('%m%d%H%M')
save_path = ('checkpoints/fasterrcnn_%s' % timestr)
for (k_, v_) in kwargs.items():
save_path += ('_%s' % v_)
save_dir = os.path.dirname(save_path)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
|
def get_object(self):
'\n retrieve auhtenticated user\n '
return self.request.user
| -7,649,043,722,026,112,000
|
retrieve auhtenticated user
|
app/user/views.py
|
get_object
|
xemperforya/recipe-app-api
|
python
|
def get_object(self):
'\n \n '
return self.request.user
|
@distributed_trace
def list(self, **kwargs: Any) -> Iterable['_models.ManagedClusterSnapshotListResult']:
'Gets a list of managed cluster snapshots in the specified subscription.\n\n Gets a list of managed cluster snapshots in the specified subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of\n cls(response)\n :rtype:\n ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if (not next_link):
request = build_list_request(subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(subscription_id=self._config.subscription_id, template_url=next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = 'GET'
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterSnapshotListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
| -806,864,483,680,052,100
|
Gets a list of managed cluster snapshots in the specified subscription.
Gets a list of managed cluster snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
list
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def list(self, **kwargs: Any) -> Iterable['_models.ManagedClusterSnapshotListResult']:
'Gets a list of managed cluster snapshots in the specified subscription.\n\n Gets a list of managed cluster snapshots in the specified subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of\n cls(response)\n :rtype:\n ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if (not next_link):
request = build_list_request(subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(subscription_id=self._config.subscription_id, template_url=next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = 'GET'
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterSnapshotListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
|
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable['_models.ManagedClusterSnapshotListResult']:
'Lists managed cluster snapshots in the specified subscription and resource group.\n\n Lists managed cluster snapshots in the specified subscription and resource group.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of\n cls(response)\n :rtype:\n ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if (not next_link):
request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = 'GET'
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterSnapshotListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
| 6,987,555,572,338,603,000
|
Lists managed cluster snapshots in the specified subscription and resource group.
Lists managed cluster snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
list_by_resource_group
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable['_models.ManagedClusterSnapshotListResult']:
'Lists managed cluster snapshots in the specified subscription and resource group.\n\n Lists managed cluster snapshots in the specified subscription and resource group.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ManagedClusterSnapshotListResult or the result of\n cls(response)\n :rtype:\n ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshotListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if (not next_link):
request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = 'GET'
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterSnapshotListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
|
@distributed_trace
def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Gets a managed cluster snapshot.\n\n Gets a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| 3,540,760,310,827,036,000
|
Gets a managed cluster snapshot.
Gets a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
get
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Gets a managed cluster snapshot.\n\n Gets a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
@distributed_trace
def create_or_update(self, resource_group_name: str, resource_name: str, parameters: '_models.ManagedClusterSnapshot', **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Creates or updates a managed cluster snapshot.\n\n Creates or updates a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :param parameters: The managed cluster snapshot to create or update.\n :type parameters:\n ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(parameters, 'ManagedClusterSnapshot')
request = build_create_or_update_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 201]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if (response.status_code == 200):
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if (response.status_code == 201):
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| 3,594,859,589,582,269,400
|
Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update.
:type parameters:
~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
create_or_update
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def create_or_update(self, resource_group_name: str, resource_name: str, parameters: '_models.ManagedClusterSnapshot', **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Creates or updates a managed cluster snapshot.\n\n Creates or updates a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :param parameters: The managed cluster snapshot to create or update.\n :type parameters:\n ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(parameters, 'ManagedClusterSnapshot')
request = build_create_or_update_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 201]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if (response.status_code == 200):
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if (response.status_code == 201):
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
@distributed_trace
def update_tags(self, resource_group_name: str, resource_name: str, parameters: '_models.TagsObject', **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Updates tags on a managed cluster snapshot.\n\n Updates tags on a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.\n :type parameters: ~azure.mgmt.containerservice.v2022_03_02_preview.models.TagsObject\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.update_tags.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -3,153,889,745,219,867,000
|
Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2022_03_02_preview.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
update_tags
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def update_tags(self, resource_group_name: str, resource_name: str, parameters: '_models.TagsObject', **kwargs: Any) -> '_models.ManagedClusterSnapshot':
'Updates tags on a managed cluster snapshot.\n\n Updates tags on a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.\n :type parameters: ~azure.mgmt.containerservice.v2022_03_02_preview.models.TagsObject\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ManagedClusterSnapshot, or the result of cls(response)\n :rtype: ~azure.mgmt.containerservice.v2022_03_02_preview.models.ManagedClusterSnapshot\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', 'application/json')
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.update_tags.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
@distributed_trace
def delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> None:
'Deletes a managed cluster snapshot.\n\n Deletes a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.delete.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
| -6,312,127,759,750,229,000
|
Deletes a managed cluster snapshot.
Deletes a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_03_02_preview/operations/_managed_cluster_snapshots_operations.py
|
delete
|
Hamster-Huey/azure-cli-extensions
|
python
|
@distributed_trace
def delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> None:
'Deletes a managed cluster snapshot.\n\n Deletes a managed cluster snapshot.\n\n :param resource_group_name: The name of the resource group. The name is case insensitive.\n :type resource_group_name: str\n :param resource_name: The name of the managed cluster resource.\n :type resource_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.delete.metadata['url'])
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200, 204]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
|
@property
def is_terminal(self) -> bool:
'True if the current state is a terminal state.'
if (self.life_cycle_state not in RUN_LIFE_CYCLE_STATES):
raise AirflowException('Unexpected life cycle state: {}: If the state has been introduced recently, please check the Databricks user guide for troubleshooting information'.format(self.life_cycle_state))
return (self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR'))
| 358,714,069,093,355,900
|
True if the current state is a terminal state.
|
airflow/providers/databricks/hooks/databricks.py
|
is_terminal
|
AMS-Kepler/airflow
|
python
|
@property
def is_terminal(self) -> bool:
if (self.life_cycle_state not in RUN_LIFE_CYCLE_STATES):
raise AirflowException('Unexpected life cycle state: {}: If the state has been introduced recently, please check the Databricks user guide for troubleshooting information'.format(self.life_cycle_state))
return (self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR'))
|
@property
def is_successful(self) -> bool:
'True if the result state is SUCCESS'
return (self.result_state == 'SUCCESS')
| 4,436,874,940,241,474,000
|
True if the result state is SUCCESS
|
airflow/providers/databricks/hooks/databricks.py
|
is_successful
|
AMS-Kepler/airflow
|
python
|
@property
def is_successful(self) -> bool:
return (self.result_state == 'SUCCESS')
|
def run_now(self, json: dict) -> int:
'\n Utility function to call the ``api/2.0/jobs/run-now`` endpoint.\n\n :param json: The data used in the body of the request to the ``run-now`` endpoint.\n :return: the run_id as an int\n :rtype: str\n '
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response['run_id']
| -1,929,148,863,777,814,500
|
Utility function to call the ``api/2.0/jobs/run-now`` endpoint.
:param json: The data used in the body of the request to the ``run-now`` endpoint.
:return: the run_id as an int
:rtype: str
|
airflow/providers/databricks/hooks/databricks.py
|
run_now
|
AMS-Kepler/airflow
|
python
|
def run_now(self, json: dict) -> int:
'\n Utility function to call the ``api/2.0/jobs/run-now`` endpoint.\n\n :param json: The data used in the body of the request to the ``run-now`` endpoint.\n :return: the run_id as an int\n :rtype: str\n '
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response['run_id']
|
def submit_run(self, json: dict) -> int:
'\n Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.\n\n :param json: The data used in the body of the request to the ``submit`` endpoint.\n :return: the run_id as an int\n :rtype: str\n '
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
| 6,492,600,274,998,970,000
|
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:return: the run_id as an int
:rtype: str
|
airflow/providers/databricks/hooks/databricks.py
|
submit_run
|
AMS-Kepler/airflow
|
python
|
def submit_run(self, json: dict) -> int:
'\n Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.\n\n :param json: The data used in the body of the request to the ``submit`` endpoint.\n :return: the run_id as an int\n :rtype: str\n '
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
|
def list_jobs(self, limit: int=25, offset: int=0, expand_tasks: bool=False) -> List[Dict[(str, Any)]]:
'\n Lists the jobs in the Databricks Job Service.\n\n :param limit: The limit/batch size used to retrieve jobs.\n :param offset: The offset of the first job to return, relative to the most recently created job.\n :param expand_tasks: Whether to include task and cluster details in the response.\n :return: A list of jobs.\n '
has_more = True
jobs = []
while has_more:
json = {'limit': limit, 'offset': offset, 'expand_tasks': expand_tasks}
response = self._do_api_call(LIST_JOBS_ENDPOINT, json)
jobs += (response['jobs'] if ('jobs' in response) else [])
has_more = response.get('has_more', False)
if has_more:
offset += len(response['jobs'])
return jobs
| -2,245,901,606,107,715,300
|
Lists the jobs in the Databricks Job Service.
:param limit: The limit/batch size used to retrieve jobs.
:param offset: The offset of the first job to return, relative to the most recently created job.
:param expand_tasks: Whether to include task and cluster details in the response.
:return: A list of jobs.
|
airflow/providers/databricks/hooks/databricks.py
|
list_jobs
|
AMS-Kepler/airflow
|
python
|
def list_jobs(self, limit: int=25, offset: int=0, expand_tasks: bool=False) -> List[Dict[(str, Any)]]:
'\n Lists the jobs in the Databricks Job Service.\n\n :param limit: The limit/batch size used to retrieve jobs.\n :param offset: The offset of the first job to return, relative to the most recently created job.\n :param expand_tasks: Whether to include task and cluster details in the response.\n :return: A list of jobs.\n '
has_more = True
jobs = []
while has_more:
json = {'limit': limit, 'offset': offset, 'expand_tasks': expand_tasks}
response = self._do_api_call(LIST_JOBS_ENDPOINT, json)
jobs += (response['jobs'] if ('jobs' in response) else [])
has_more = response.get('has_more', False)
if has_more:
offset += len(response['jobs'])
return jobs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.