body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
d0004089ba2540c68d0439efafbbf23ecefec875dc40743a0f3bdbc4d989741c
|
def wait_loaded_tag(self, tag_name, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks until the element with the given tag name\n is found on the page.\n '
self.wait_until((lambda driver: driver.find_element_by_tag_name(tag_name)), timeout)
|
Helper function that blocks until the element with the given tag name
is found on the page.
|
anaf/test.py
|
wait_loaded_tag
|
tovmeod/anaf
| 2
|
python
|
def wait_loaded_tag(self, tag_name, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks until the element with the given tag name\n is found on the page.\n '
self.wait_until((lambda driver: driver.find_element_by_tag_name(tag_name)), timeout)
|
def wait_loaded_tag(self, tag_name, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks until the element with the given tag name\n is found on the page.\n '
self.wait_until((lambda driver: driver.find_element_by_tag_name(tag_name)), timeout)<|docstring|>Helper function that blocks until the element with the given tag name
is found on the page.<|endoftext|>
|
7bd4083875cc77fddccf10b279735d6629c2554616ae9ba35ff5d0fb56d8f5e7
|
def wait_page_loaded(self):
'\n Block until page has started to load.\n '
from selenium.common.exceptions import TimeoutException
try:
self.wait_loaded_tag('body')
except TimeoutException:
pass
|
Block until page has started to load.
|
anaf/test.py
|
wait_page_loaded
|
tovmeod/anaf
| 2
|
python
|
def wait_page_loaded(self):
'\n \n '
from selenium.common.exceptions import TimeoutException
try:
self.wait_loaded_tag('body')
except TimeoutException:
pass
|
def wait_page_loaded(self):
'\n \n '
from selenium.common.exceptions import TimeoutException
try:
self.wait_loaded_tag('body')
except TimeoutException:
pass<|docstring|>Block until page has started to load.<|endoftext|>
|
0fdc0d11fb128c4092bb2d0949c9110e7efdafa50ab350c5e8204c85feb105c4
|
def VmFaultToleranceTooManyVMsOnHost(vim, *args, **kwargs):
'This fault is returned when a host has more than the recommended number of\n Fault Tolerance VMs running on it.'
obj = vim.client.factory.create('{urn:vim25}VmFaultToleranceTooManyVMsOnHost')
if ((len(args) + len(kwargs)) < 6):
raise IndexError(('Expected at least 7 arguments got: %d' % len(args)))
required = ['hostName', 'maxNumFtVms', 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage']
optional = []
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
|
This fault is returned when a host has more than the recommended number of
Fault Tolerance VMs running on it.
|
pyvisdk/do/vm_fault_tolerance_too_many_v_ms_on_host.py
|
VmFaultToleranceTooManyVMsOnHost
|
Infinidat/pyvisdk
| 0
|
python
|
def VmFaultToleranceTooManyVMsOnHost(vim, *args, **kwargs):
'This fault is returned when a host has more than the recommended number of\n Fault Tolerance VMs running on it.'
obj = vim.client.factory.create('{urn:vim25}VmFaultToleranceTooManyVMsOnHost')
if ((len(args) + len(kwargs)) < 6):
raise IndexError(('Expected at least 7 arguments got: %d' % len(args)))
required = ['hostName', 'maxNumFtVms', 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage']
optional = []
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
|
def VmFaultToleranceTooManyVMsOnHost(vim, *args, **kwargs):
'This fault is returned when a host has more than the recommended number of\n Fault Tolerance VMs running on it.'
obj = vim.client.factory.create('{urn:vim25}VmFaultToleranceTooManyVMsOnHost')
if ((len(args) + len(kwargs)) < 6):
raise IndexError(('Expected at least 7 arguments got: %d' % len(args)))
required = ['hostName', 'maxNumFtVms', 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage']
optional = []
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj<|docstring|>This fault is returned when a host has more than the recommended number of
Fault Tolerance VMs running on it.<|endoftext|>
|
05c54b9d0b8199a4c342f6ac29b9dfef64f03f822921c5686f5a9b7874b34260
|
def findPoisonedDuration(self, timeSeries, duration):
'\n :type timeSeries: List[int]\n :type duration: int\n :rtype: int\n '
totalTime = 0
n = len(timeSeries)
for i in range(n):
if ((i != (n - 1)) and (((timeSeries[i] + duration) - 1) >= timeSeries[(i + 1)])):
totalTime += (timeSeries[(i + 1)] - timeSeries[i])
else:
totalTime += duration
return totalTime
'\n Example\n \n timeSeries = [1,5,7,8] duration = 4\n \n poisonEffect remains from [t , t+ duration-1] (inclusive)\n \n poisonEffect Duration\n [1-4] poison effect = 4\n [5-8] poison effect 5 to 8 but after attack of 7 it will reset so it will be [5-7] poison effect = 2\n (5,6) because on 7 there will be next attack and this will be added in next part\n [7-10] poison effect 7 to 10 but after attack of 8 it will reset so it will be [7-8] poison effect =1\n (7) because on 8 there will be next attack and this will be added in next part\n [8-11] last element in timeSeries so it will have full duration poison effect = 4\n \n Hence Answer will be 11\n \n '
|
:type timeSeries: List[int]
:type duration: int
:rtype: int
|
greedy/teemoAttacking.py
|
findPoisonedDuration
|
kushvr7/High-On-DSA
| 76
|
python
|
def findPoisonedDuration(self, timeSeries, duration):
'\n :type timeSeries: List[int]\n :type duration: int\n :rtype: int\n '
totalTime = 0
n = len(timeSeries)
for i in range(n):
if ((i != (n - 1)) and (((timeSeries[i] + duration) - 1) >= timeSeries[(i + 1)])):
totalTime += (timeSeries[(i + 1)] - timeSeries[i])
else:
totalTime += duration
return totalTime
'\n Example\n \n timeSeries = [1,5,7,8] duration = 4\n \n poisonEffect remains from [t , t+ duration-1] (inclusive)\n \n poisonEffect Duration\n [1-4] poison effect = 4\n [5-8] poison effect 5 to 8 but after attack of 7 it will reset so it will be [5-7] poison effect = 2\n (5,6) because on 7 there will be next attack and this will be added in next part\n [7-10] poison effect 7 to 10 but after attack of 8 it will reset so it will be [7-8] poison effect =1\n (7) because on 8 there will be next attack and this will be added in next part\n [8-11] last element in timeSeries so it will have full duration poison effect = 4\n \n Hence Answer will be 11\n \n '
|
def findPoisonedDuration(self, timeSeries, duration):
'\n :type timeSeries: List[int]\n :type duration: int\n :rtype: int\n '
totalTime = 0
n = len(timeSeries)
for i in range(n):
if ((i != (n - 1)) and (((timeSeries[i] + duration) - 1) >= timeSeries[(i + 1)])):
totalTime += (timeSeries[(i + 1)] - timeSeries[i])
else:
totalTime += duration
return totalTime
'\n Example\n \n timeSeries = [1,5,7,8] duration = 4\n \n poisonEffect remains from [t , t+ duration-1] (inclusive)\n \n poisonEffect Duration\n [1-4] poison effect = 4\n [5-8] poison effect 5 to 8 but after attack of 7 it will reset so it will be [5-7] poison effect = 2\n (5,6) because on 7 there will be next attack and this will be added in next part\n [7-10] poison effect 7 to 10 but after attack of 8 it will reset so it will be [7-8] poison effect =1\n (7) because on 8 there will be next attack and this will be added in next part\n [8-11] last element in timeSeries so it will have full duration poison effect = 4\n \n Hence Answer will be 11\n \n '<|docstring|>:type timeSeries: List[int]
:type duration: int
:rtype: int<|endoftext|>
|
ad7c461588a4748df23e5c3d1f3423f11477ef1be90aed36ae1a1088ec86b556
|
def save_task(entry, db_object):
"\n In Tasks GUI - Passes self.detailed\n Prepares document for saving to database\n If _id == 'new', a new document will be created, with a generated UUID\n :param entry:\n :param db_object:\n :return:\n "
pri = entry['priority']
if (pri == 0):
pri = None
entry_id = entry['_id']
if (entry_id == 'new'):
"If _id is 'new', a new UUID will be generated, and a new file created"
new_task_template = {'_id': entry_id, 'productivity': True, 'type': 'task', 'due': entry.get('due', None), 'start': datetime.datetime.now().isoformat(), 'end': None, 'title': entry['title'], 'description': None, 'created': datetime.datetime.now().isoformat(), 'status': 'plan', 'project': None, 'context': None, 'priority': pri, 'tags': []}
docitem = {'doc': new_task_template}
else:
fp = entry['filepath']
docitem = db_object.fetchone(filepath=fp)
doc = docitem['doc']
doc['title'] = entry['title']
doc['priority'] = pri
db_object.insert(docitem)
return docitem
|
In Tasks GUI - Passes self.detailed
Prepares document for saving to database
If _id == 'new', a new document will be created, with a generated UUID
:param entry:
:param db_object:
:return:
|
tasks/supporters/tasks_common.py
|
save_task
|
lybekk/breefkase-tasks-cli
| 0
|
python
|
def save_task(entry, db_object):
"\n In Tasks GUI - Passes self.detailed\n Prepares document for saving to database\n If _id == 'new', a new document will be created, with a generated UUID\n :param entry:\n :param db_object:\n :return:\n "
pri = entry['priority']
if (pri == 0):
pri = None
entry_id = entry['_id']
if (entry_id == 'new'):
"If _id is 'new', a new UUID will be generated, and a new file created"
new_task_template = {'_id': entry_id, 'productivity': True, 'type': 'task', 'due': entry.get('due', None), 'start': datetime.datetime.now().isoformat(), 'end': None, 'title': entry['title'], 'description': None, 'created': datetime.datetime.now().isoformat(), 'status': 'plan', 'project': None, 'context': None, 'priority': pri, 'tags': []}
docitem = {'doc': new_task_template}
else:
fp = entry['filepath']
docitem = db_object.fetchone(filepath=fp)
doc = docitem['doc']
doc['title'] = entry['title']
doc['priority'] = pri
db_object.insert(docitem)
return docitem
|
def save_task(entry, db_object):
"\n In Tasks GUI - Passes self.detailed\n Prepares document for saving to database\n If _id == 'new', a new document will be created, with a generated UUID\n :param entry:\n :param db_object:\n :return:\n "
pri = entry['priority']
if (pri == 0):
pri = None
entry_id = entry['_id']
if (entry_id == 'new'):
"If _id is 'new', a new UUID will be generated, and a new file created"
new_task_template = {'_id': entry_id, 'productivity': True, 'type': 'task', 'due': entry.get('due', None), 'start': datetime.datetime.now().isoformat(), 'end': None, 'title': entry['title'], 'description': None, 'created': datetime.datetime.now().isoformat(), 'status': 'plan', 'project': None, 'context': None, 'priority': pri, 'tags': []}
docitem = {'doc': new_task_template}
else:
fp = entry['filepath']
docitem = db_object.fetchone(filepath=fp)
doc = docitem['doc']
doc['title'] = entry['title']
doc['priority'] = pri
db_object.insert(docitem)
return docitem<|docstring|>In Tasks GUI - Passes self.detailed
Prepares document for saving to database
If _id == 'new', a new document will be created, with a generated UUID
:param entry:
:param db_object:
:return:<|endoftext|>
|
5727be2b2f31e8b75fdc82e2a2221734c49079714a203bc3ab4009cf29505901
|
def getHotNews(self, offset=None):
'\n 爬取热点新闻\n :param offset:\n :return:\n '
json_data = None
url = 'https://www.csdn.net/api/articles'
params = {'type': 'more', 'category': 'python', 'shown_offset': (offset if offset else '')}
resp = http.pc().get(url, params=params)
valid = (resp.status_code == 200)
if valid:
json_data = resp.json()
return json_data
|
爬取热点新闻
:param offset:
:return:
|
spider_service/app/spider/csdn/csdn.py
|
getHotNews
|
seniortesting/python-spider
| 0
|
python
|
def getHotNews(self, offset=None):
'\n 爬取热点新闻\n :param offset:\n :return:\n '
json_data = None
url = 'https://www.csdn.net/api/articles'
params = {'type': 'more', 'category': 'python', 'shown_offset': (offset if offset else )}
resp = http.pc().get(url, params=params)
valid = (resp.status_code == 200)
if valid:
json_data = resp.json()
return json_data
|
def getHotNews(self, offset=None):
'\n 爬取热点新闻\n :param offset:\n :return:\n '
json_data = None
url = 'https://www.csdn.net/api/articles'
params = {'type': 'more', 'category': 'python', 'shown_offset': (offset if offset else )}
resp = http.pc().get(url, params=params)
valid = (resp.status_code == 200)
if valid:
json_data = resp.json()
return json_data<|docstring|>爬取热点新闻
:param offset:
:return:<|endoftext|>
|
bc1df6c2b99802a233c90427fc6f5c9a6900e0645689ea707708f98a60d16cd0
|
def test_stable_neural_de():
'Stable: basic functionality'
d = ToyDataset()
(X, yn) = d.generate(n_samples=512, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = Stable(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
Stable: basic functionality
|
test/test_energy.py
|
test_stable_neural_de
|
mirams/torchdyn
| 1
|
python
|
def test_stable_neural_de():
d = ToyDataset()
(X, yn) = d.generate(n_samples=512, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = Stable(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
def test_stable_neural_de():
d = ToyDataset()
(X, yn) = d.generate(n_samples=512, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = Stable(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)<|docstring|>Stable: basic functionality<|endoftext|>
|
8e842efd9c870ea9c27f40f63df1d806bfe6f452c476513754d42b738becb5b3
|
def test_hnn():
'HNN: basic functionality'
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = HNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
HNN: basic functionality
|
test/test_energy.py
|
test_hnn
|
mirams/torchdyn
| 1
|
python
|
def test_hnn():
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = HNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
def test_hnn():
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = HNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)<|docstring|>HNN: basic functionality<|endoftext|>
|
cc9635aa288d992c50de89151328ea12d924d944fc9a93775f4d02a54fa979f2
|
def test_lnn():
'LNN: basic functionality'
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = LNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
LNN: basic functionality
|
test/test_energy.py
|
test_lnn
|
mirams/torchdyn
| 1
|
python
|
def test_lnn():
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = LNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)
|
def test_lnn():
d = ToyDataset()
(X, yn) = d.generate(n_samples=32, dataset_type='moons', noise=0.4)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
f = LNN(nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)))
model = NeuralDE(f).to(device)
learn = TestLearner(model, trainloader=trainloader)
trainer = pl.Trainer(min_epochs=10, max_epochs=30)
trainer.fit(learn)<|docstring|>LNN: basic functionality<|endoftext|>
|
dbbe4ddd72ceb4b8824d27a3274e72f4cf2370e08d7631f067980967c719c1ba
|
def negative_unregister_not_existing_image(self, image_name):
'Test to unregister already unregistrated image'
command_to_execute = 'image unregister'
self.check_negative_scenarios(base.TEMPEST_ERROR_MESSAGE, command_to_execute, image_name)
|
Test to unregister already unregistrated image
|
sahara_tempest_plugin/tests/cli/images.py
|
negative_unregister_not_existing_image
|
mail2nsrajesh/sahara-tests
| 18
|
python
|
def negative_unregister_not_existing_image(self, image_name):
command_to_execute = 'image unregister'
self.check_negative_scenarios(base.TEMPEST_ERROR_MESSAGE, command_to_execute, image_name)
|
def negative_unregister_not_existing_image(self, image_name):
command_to_execute = 'image unregister'
self.check_negative_scenarios(base.TEMPEST_ERROR_MESSAGE, command_to_execute, image_name)<|docstring|>Test to unregister already unregistrated image<|endoftext|>
|
4662f5729624d2c716147871bb08943492d3dd6ba37b9800cc60666598745628
|
def format_action(self, action):
'\n Maps continuous action into binary output\n -1 => open, 1 => closed\n\n Args:\n action (np.array): gripper-specific action\n\n Raises:\n AssertionError: [Invalid action dimension size]\n '
assert (len(action) == 1)
self.current_action = np.clip((self.current_action + (self.speed * np.sign(action))), (- 1.0), 1.0)
return self.current_action
|
Maps continuous action into binary output
-1 => open, 1 => closed
Args:
action (np.array): gripper-specific action
Raises:
AssertionError: [Invalid action dimension size]
|
robosuite/models/grippers/robotiq_85_gripper.py
|
format_action
|
wangguanzhi/robosuite
| 0
|
python
|
def format_action(self, action):
'\n Maps continuous action into binary output\n -1 => open, 1 => closed\n\n Args:\n action (np.array): gripper-specific action\n\n Raises:\n AssertionError: [Invalid action dimension size]\n '
assert (len(action) == 1)
self.current_action = np.clip((self.current_action + (self.speed * np.sign(action))), (- 1.0), 1.0)
return self.current_action
|
def format_action(self, action):
'\n Maps continuous action into binary output\n -1 => open, 1 => closed\n\n Args:\n action (np.array): gripper-specific action\n\n Raises:\n AssertionError: [Invalid action dimension size]\n '
assert (len(action) == 1)
self.current_action = np.clip((self.current_action + (self.speed * np.sign(action))), (- 1.0), 1.0)
return self.current_action<|docstring|>Maps continuous action into binary output
-1 => open, 1 => closed
Args:
action (np.array): gripper-specific action
Raises:
AssertionError: [Invalid action dimension size]<|endoftext|>
|
fa36b6b5c728e6b4ad85aa9f5bef7933ee148096f1e688dc76a0f05041fa774a
|
def remove_markdown(text):
'\n Remove markdown syntax from text.\n\n Args:\n text: String of raw markdown text\n\n Returns:\n A processed string without any markdown syntax\n '
html = markdown(text)
return ''.join(BeautifulSoup(html, 'html.parser').findAll(text=True))
|
Remove markdown syntax from text.
Args:
text: String of raw markdown text
Returns:
A processed string without any markdown syntax
|
streaming/reddit.py
|
remove_markdown
|
seanhelm/streddit
| 0
|
python
|
def remove_markdown(text):
'\n Remove markdown syntax from text.\n\n Args:\n text: String of raw markdown text\n\n Returns:\n A processed string without any markdown syntax\n '
html = markdown(text)
return .join(BeautifulSoup(html, 'html.parser').findAll(text=True))
|
def remove_markdown(text):
'\n Remove markdown syntax from text.\n\n Args:\n text: String of raw markdown text\n\n Returns:\n A processed string without any markdown syntax\n '
html = markdown(text)
return .join(BeautifulSoup(html, 'html.parser').findAll(text=True))<|docstring|>Remove markdown syntax from text.
Args:
text: String of raw markdown text
Returns:
A processed string without any markdown syntax<|endoftext|>
|
f0fcae19a401ed2570fcdef75878c53f00314b4115cf80dd6153299631de0241
|
def process_comment(comment, collection, max_polarity):
'\n Process the entire comment by removing markdown syntax and performing sentiment\n analysis on the comment body.\n\n Args:\n comment: Entire comment information\n '
body_plain = remove_markdown(comment.body)
blob = TextBlob(body_plain)
if (blob.sentiment[0] <= max_polarity):
other_keys = ['permalink', 'subreddit', 'link_title']
comment_dict = {key: comment.__dict__[key] for key in other_keys}
comment_dict.update(author=comment.author.name, subreddit=comment.author.name, created=datetime.utcfromtimestamp(comment.created_utc), body=body_plain, polarity=blob.sentiment[0])
id = collection.insert_one(comment_dict).inserted_id
print(('\t%s: %s' % (id, comment_dict)))
|
Process the entire comment by removing markdown syntax and performing sentiment
analysis on the comment body.
Args:
comment: Entire comment information
|
streaming/reddit.py
|
process_comment
|
seanhelm/streddit
| 0
|
python
|
def process_comment(comment, collection, max_polarity):
'\n Process the entire comment by removing markdown syntax and performing sentiment\n analysis on the comment body.\n\n Args:\n comment: Entire comment information\n '
body_plain = remove_markdown(comment.body)
blob = TextBlob(body_plain)
if (blob.sentiment[0] <= max_polarity):
other_keys = ['permalink', 'subreddit', 'link_title']
comment_dict = {key: comment.__dict__[key] for key in other_keys}
comment_dict.update(author=comment.author.name, subreddit=comment.author.name, created=datetime.utcfromtimestamp(comment.created_utc), body=body_plain, polarity=blob.sentiment[0])
id = collection.insert_one(comment_dict).inserted_id
print(('\t%s: %s' % (id, comment_dict)))
|
def process_comment(comment, collection, max_polarity):
'\n Process the entire comment by removing markdown syntax and performing sentiment\n analysis on the comment body.\n\n Args:\n comment: Entire comment information\n '
body_plain = remove_markdown(comment.body)
blob = TextBlob(body_plain)
if (blob.sentiment[0] <= max_polarity):
other_keys = ['permalink', 'subreddit', 'link_title']
comment_dict = {key: comment.__dict__[key] for key in other_keys}
comment_dict.update(author=comment.author.name, subreddit=comment.author.name, created=datetime.utcfromtimestamp(comment.created_utc), body=body_plain, polarity=blob.sentiment[0])
id = collection.insert_one(comment_dict).inserted_id
print(('\t%s: %s' % (id, comment_dict)))<|docstring|>Process the entire comment by removing markdown syntax and performing sentiment
analysis on the comment body.
Args:
comment: Entire comment information<|endoftext|>
|
cb513be49a06a3d6d23f72bcb823f69e6e6f6345acf00dd865e6da5940a54783
|
async def start(self):
'\n Start the menu\n '
components = self._get_buttons()
self._message = (await self._ctx.send(embed=self._cur, components=[components]))
self.__task = self._ctx.bot.loop.create_task(self._loop())
self._timer = Timer(self._done, self._timeout)
(await self._timer.start())
|
Start the menu
|
plugins/system/settings/menus.py
|
start
|
rybot666/LiteBot
| 22
|
python
|
async def start(self):
'\n \n '
components = self._get_buttons()
self._message = (await self._ctx.send(embed=self._cur, components=[components]))
self.__task = self._ctx.bot.loop.create_task(self._loop())
self._timer = Timer(self._done, self._timeout)
(await self._timer.start())
|
async def start(self):
'\n \n '
components = self._get_buttons()
self._message = (await self._ctx.send(embed=self._cur, components=[components]))
self.__task = self._ctx.bot.loop.create_task(self._loop())
self._timer = Timer(self._done, self._timeout)
(await self._timer.start())<|docstring|>Start the menu<|endoftext|>
|
2a8dc2594e2a622c162ae16122facfab136927b1bbe753897228065057b1ac69
|
async def start(self):
'\n Start the menu\n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == SettingsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))
|
Start the menu
|
plugins/system/settings/menus.py
|
start
|
rybot666/LiteBot
| 22
|
python
|
async def start(self):
'\n \n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == SettingsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))
|
async def start(self):
'\n \n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == SettingsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))<|docstring|>Start the menu<|endoftext|>
|
d7d31c495822f72dc9631d8c7b0f3e61854378e41dd7f93015c2519bee6ce8cb
|
async def start(self):
'\n Start the menu\n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author) and (m.id == self._message.id))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == PluginsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))
|
Start the menu
|
plugins/system/settings/menus.py
|
start
|
rybot666/LiteBot
| 22
|
python
|
async def start(self):
'\n \n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author) and (m.id == self._message.id))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == PluginsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))
|
async def start(self):
'\n \n '
(await self._message.edit(embed=self._embed, components=[self._components]))
while True:
def check(m):
return ((m.channel == self._ctx.channel) and (m.author == self._ctx.author) and (m.id == self._message.id))
interaction = (await self._ctx.bot.wait_for('button_click', check=check))
(await self._timer.reset())
interaction_type = interaction.raw_data['d']['data']['custom_id']
if (interaction_type == PluginsConfigMenu.InteractionTypes.BACK):
(await interaction.respond(type=InteractionType.DeferredUpdateMessage))
break
with self._timer:
(await getattr(self, f'_{interaction_type}', (lambda i: None))(interaction))<|docstring|>Start the menu<|endoftext|>
|
3237403786aa5fddbab93657a5594a879d19e140b3c48af69292a1284aa7a118
|
def __init__(self, position, tpl_shape):
'\n\n :param position: tuple with x and y coordinates\n :param tpl_shape: tuple with height and width of the shape\n '
self.x = position[0]
self.y = position[1]
self.tpl_width = tpl_shape[1]
self.tpl_height = tpl_shape[0]
|
:param position: tuple with x and y coordinates
:param tpl_shape: tuple with height and width of the shape
|
gui_automation/spot.py
|
__init__
|
marcosboggia/gui_automation
| 10
|
python
|
def __init__(self, position, tpl_shape):
'\n\n :param position: tuple with x and y coordinates\n :param tpl_shape: tuple with height and width of the shape\n '
self.x = position[0]
self.y = position[1]
self.tpl_width = tpl_shape[1]
self.tpl_height = tpl_shape[0]
|
def __init__(self, position, tpl_shape):
'\n\n :param position: tuple with x and y coordinates\n :param tpl_shape: tuple with height and width of the shape\n '
self.x = position[0]
self.y = position[1]
self.tpl_width = tpl_shape[1]
self.tpl_height = tpl_shape[0]<|docstring|>:param position: tuple with x and y coordinates
:param tpl_shape: tuple with height and width of the shape<|endoftext|>
|
ae50e6216767c17917e3e57b775a80e1bf32fa3ee84f4f8c2f9686625a8aedfb
|
def custom_position(self, x_multiplier, x_modifier, y_multiplier, y_modifier):
'\n This method helps calculate any coordinate within the image detected.\n Ej: x, y = custom_position(3, 8, 1, 2)\n 3/8 of the width\n __o_____\n | |\n | |\n | x o 1/2 of the height\n | |\n |________|\n\n :param x_multiplier: how many parts of the divided width to take\n :param x_modifier: in how many parts the width is going to be divided\n :param y_multiplier: same as width but with height\n :param y_modifier: same as width but with height\n :return:\n new_x, new_y: the calculated coordinates\n '
new_x = int((self.x + ((self.tpl_width / x_modifier) * x_multiplier)))
new_y = int((self.y + ((self.tpl_height / y_modifier) * y_multiplier)))
return (new_x, new_y)
|
This method helps calculate any coordinate within the image detected.
Ej: x, y = custom_position(3, 8, 1, 2)
3/8 of the width
__o_____
| |
| |
| x o 1/2 of the height
| |
|________|
:param x_multiplier: how many parts of the divided width to take
:param x_modifier: in how many parts the width is going to be divided
:param y_multiplier: same as width but with height
:param y_modifier: same as width but with height
:return:
new_x, new_y: the calculated coordinates
|
gui_automation/spot.py
|
custom_position
|
marcosboggia/gui_automation
| 10
|
python
|
def custom_position(self, x_multiplier, x_modifier, y_multiplier, y_modifier):
'\n This method helps calculate any coordinate within the image detected.\n Ej: x, y = custom_position(3, 8, 1, 2)\n 3/8 of the width\n __o_____\n | |\n | |\n | x o 1/2 of the height\n | |\n |________|\n\n :param x_multiplier: how many parts of the divided width to take\n :param x_modifier: in how many parts the width is going to be divided\n :param y_multiplier: same as width but with height\n :param y_modifier: same as width but with height\n :return:\n new_x, new_y: the calculated coordinates\n '
new_x = int((self.x + ((self.tpl_width / x_modifier) * x_multiplier)))
new_y = int((self.y + ((self.tpl_height / y_modifier) * y_multiplier)))
return (new_x, new_y)
|
def custom_position(self, x_multiplier, x_modifier, y_multiplier, y_modifier):
'\n This method helps calculate any coordinate within the image detected.\n Ej: x, y = custom_position(3, 8, 1, 2)\n 3/8 of the width\n __o_____\n | |\n | |\n | x o 1/2 of the height\n | |\n |________|\n\n :param x_multiplier: how many parts of the divided width to take\n :param x_modifier: in how many parts the width is going to be divided\n :param y_multiplier: same as width but with height\n :param y_modifier: same as width but with height\n :return:\n new_x, new_y: the calculated coordinates\n '
new_x = int((self.x + ((self.tpl_width / x_modifier) * x_multiplier)))
new_y = int((self.y + ((self.tpl_height / y_modifier) * y_multiplier)))
return (new_x, new_y)<|docstring|>This method helps calculate any coordinate within the image detected.
Ej: x, y = custom_position(3, 8, 1, 2)
3/8 of the width
__o_____
| |
| |
| x o 1/2 of the height
| |
|________|
:param x_multiplier: how many parts of the divided width to take
:param x_modifier: in how many parts the width is going to be divided
:param y_multiplier: same as width but with height
:param y_modifier: same as width but with height
:return:
new_x, new_y: the calculated coordinates<|endoftext|>
|
6a4bd02531d977a4231afac88180edd7fda311d89a41ff7915591573e951bc0a
|
def revert_scaled_position_error(self, scale):
'\n If image has been resized this function must be used to fix the displacement of the position to the original\n image.\n :param scale: the scale used to resize the image\n :return: No return\n '
self.x = int((self.x / scale))
self.y = int((self.y / scale))
|
If image has been resized this function must be used to fix the displacement of the position to the original
image.
:param scale: the scale used to resize the image
:return: No return
|
gui_automation/spot.py
|
revert_scaled_position_error
|
marcosboggia/gui_automation
| 10
|
python
|
def revert_scaled_position_error(self, scale):
'\n If image has been resized this function must be used to fix the displacement of the position to the original\n image.\n :param scale: the scale used to resize the image\n :return: No return\n '
self.x = int((self.x / scale))
self.y = int((self.y / scale))
|
def revert_scaled_position_error(self, scale):
'\n If image has been resized this function must be used to fix the displacement of the position to the original\n image.\n :param scale: the scale used to resize the image\n :return: No return\n '
self.x = int((self.x / scale))
self.y = int((self.y / scale))<|docstring|>If image has been resized this function must be used to fix the displacement of the position to the original
image.
:param scale: the scale used to resize the image
:return: No return<|endoftext|>
|
b379b0e30880590801adf7891bba0449d8464699aca6a32acbe863313e8312a4
|
@profilable
def schema_compatible(value, schema_or_field, context=None):
'The schema_compatible function converts any value to guillotina.schema\n compatible data when possible, raising a TypeError for unsupported values.\n This is done by using the ISchemaCompatible converters.\n '
if (value is None):
return value
try:
return get_adapter(schema_or_field, IJSONToValue, args=[value, context])
except ComponentLookupError:
raise ValueDeserializationError(schema_or_field, value, 'Deserializer not found for field')
|
The schema_compatible function converts any value to guillotina.schema
compatible data when possible, raising a TypeError for unsupported values.
This is done by using the ISchemaCompatible converters.
|
guillotina/json/deserialize_value.py
|
schema_compatible
|
vinissimus/guillotina
| 173
|
python
|
@profilable
def schema_compatible(value, schema_or_field, context=None):
'The schema_compatible function converts any value to guillotina.schema\n compatible data when possible, raising a TypeError for unsupported values.\n This is done by using the ISchemaCompatible converters.\n '
if (value is None):
return value
try:
return get_adapter(schema_or_field, IJSONToValue, args=[value, context])
except ComponentLookupError:
raise ValueDeserializationError(schema_or_field, value, 'Deserializer not found for field')
|
@profilable
def schema_compatible(value, schema_or_field, context=None):
'The schema_compatible function converts any value to guillotina.schema\n compatible data when possible, raising a TypeError for unsupported values.\n This is done by using the ISchemaCompatible converters.\n '
if (value is None):
return value
try:
return get_adapter(schema_or_field, IJSONToValue, args=[value, context])
except ComponentLookupError:
raise ValueDeserializationError(schema_or_field, value, 'Deserializer not found for field')<|docstring|>The schema_compatible function converts any value to guillotina.schema
compatible data when possible, raising a TypeError for unsupported values.
This is done by using the ISchemaCompatible converters.<|endoftext|>
|
a0f5cc66b981a73be501e60bf5ab28e69e3b0f9a7d08d16667a7860be56eca78
|
@staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: argparse.ArgumentParser to register arguments with.\n '
parser.add_argument('job_name', metavar='JOB_NAME', help='Unique name to assign to the job.')
parser.add_argument('--template-file-gcs-location', help="Google Cloud Storage location of the flex template to run. (Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"), required=True)
parser.add_argument('--region', metavar='REGION_ID', help=("Region ID of the job's regional endpoint. " + dataflow_util.DEFAULT_REGION_MESSAGE))
parser.add_argument('--staging-location', help="Default Google Cloud Storage location to stage local files.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--temp-location', help="Default Google Cloud Storage location to stage temporary files. If not set, defaults to the value for --staging-location.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--service-account-email', type=arg_parsers.RegexpValidator('.*@.*\\..*', 'must provide a valid email address'), help='Service account to run the workers as.')
parser.add_argument('--max-workers', type=int, help='Maximum number of workers to run.')
parser.add_argument('--disable-public-ips', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.disable_public_ips), help='Cloud Dataflow workers must not use public IP addresses.')
parser.add_argument('--num-workers', type=int, help='Initial number of workers to use.')
parser.add_argument('--worker-machine-type', help='Type of machine to use for workers. Defaults to server-specified.')
parser.add_argument('--subnetwork', help='Compute Engine subnetwork for launching instances to run your pipeline.')
parser.add_argument('--network', help='Compute Engine network for launching instances to run your pipeline.')
parser.add_argument('--dataflow-kms-key', help='Cloud KMS key to protect the job resources.')
region_group = parser.add_mutually_exclusive_group()
region_group.add_argument('--worker-region', help='Region to run the workers in.')
region_group.add_argument('--worker-zone', help='Zone to run the workers in.')
parser.add_argument('--enable-streaming-engine', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.enable_streaming_engine), help='Enabling Streaming Engine for the streaming job.')
parser.add_argument('--additional-experiments', metavar='ADDITIONAL_EXPERIMENTS', type=arg_parsers.ArgList(), action=arg_parsers.UpdateAction, help='Additional experiments to pass to the job.')
parser.add_argument('--additional-user-labels', metavar='ADDITIONAL_USER_LABELS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Additional user labels to pass to the job.')
parser.add_argument('--parameters', metavar='PARAMETERS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Parameters to pass to the job.')
streaming_update_args = parser.add_argument_group()
streaming_update_args.add_argument('--transform-name-mappings', metavar='TRANSFORM_NAME_MAPPINGS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Transform name mappings for the streaming update job.')
streaming_update_args.add_argument('--update', help='Set this to true for streaming update jobs.', action=arg_parsers.StoreTrueFalseAction, required=True)
parser.add_argument('--flexrs-goal', help='FlexRS goal for the flex template job.', choices=['COST_OPTIMIZED', 'SPEED_OPTIMIZED'])
|
Register flags for this command.
Args:
parser: argparse.ArgumentParser to register arguments with.
|
lib/surface/dataflow/flex_template/run.py
|
Args
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2
|
python
|
@staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: argparse.ArgumentParser to register arguments with.\n '
parser.add_argument('job_name', metavar='JOB_NAME', help='Unique name to assign to the job.')
parser.add_argument('--template-file-gcs-location', help="Google Cloud Storage location of the flex template to run. (Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"), required=True)
parser.add_argument('--region', metavar='REGION_ID', help=("Region ID of the job's regional endpoint. " + dataflow_util.DEFAULT_REGION_MESSAGE))
parser.add_argument('--staging-location', help="Default Google Cloud Storage location to stage local files.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--temp-location', help="Default Google Cloud Storage location to stage temporary files. If not set, defaults to the value for --staging-location.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--service-account-email', type=arg_parsers.RegexpValidator('.*@.*\\..*', 'must provide a valid email address'), help='Service account to run the workers as.')
parser.add_argument('--max-workers', type=int, help='Maximum number of workers to run.')
parser.add_argument('--disable-public-ips', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.disable_public_ips), help='Cloud Dataflow workers must not use public IP addresses.')
parser.add_argument('--num-workers', type=int, help='Initial number of workers to use.')
parser.add_argument('--worker-machine-type', help='Type of machine to use for workers. Defaults to server-specified.')
parser.add_argument('--subnetwork', help='Compute Engine subnetwork for launching instances to run your pipeline.')
parser.add_argument('--network', help='Compute Engine network for launching instances to run your pipeline.')
parser.add_argument('--dataflow-kms-key', help='Cloud KMS key to protect the job resources.')
region_group = parser.add_mutually_exclusive_group()
region_group.add_argument('--worker-region', help='Region to run the workers in.')
region_group.add_argument('--worker-zone', help='Zone to run the workers in.')
parser.add_argument('--enable-streaming-engine', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.enable_streaming_engine), help='Enabling Streaming Engine for the streaming job.')
parser.add_argument('--additional-experiments', metavar='ADDITIONAL_EXPERIMENTS', type=arg_parsers.ArgList(), action=arg_parsers.UpdateAction, help='Additional experiments to pass to the job.')
parser.add_argument('--additional-user-labels', metavar='ADDITIONAL_USER_LABELS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Additional user labels to pass to the job.')
parser.add_argument('--parameters', metavar='PARAMETERS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Parameters to pass to the job.')
streaming_update_args = parser.add_argument_group()
streaming_update_args.add_argument('--transform-name-mappings', metavar='TRANSFORM_NAME_MAPPINGS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Transform name mappings for the streaming update job.')
streaming_update_args.add_argument('--update', help='Set this to true for streaming update jobs.', action=arg_parsers.StoreTrueFalseAction, required=True)
parser.add_argument('--flexrs-goal', help='FlexRS goal for the flex template job.', choices=['COST_OPTIMIZED', 'SPEED_OPTIMIZED'])
|
@staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: argparse.ArgumentParser to register arguments with.\n '
parser.add_argument('job_name', metavar='JOB_NAME', help='Unique name to assign to the job.')
parser.add_argument('--template-file-gcs-location', help="Google Cloud Storage location of the flex template to run. (Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"), required=True)
parser.add_argument('--region', metavar='REGION_ID', help=("Region ID of the job's regional endpoint. " + dataflow_util.DEFAULT_REGION_MESSAGE))
parser.add_argument('--staging-location', help="Default Google Cloud Storage location to stage local files.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--temp-location', help="Default Google Cloud Storage location to stage temporary files. If not set, defaults to the value for --staging-location.(Must be a URL beginning with 'gs://'.)", type=arg_parsers.RegexpValidator('^gs://.*', "Must begin with 'gs://'"))
parser.add_argument('--service-account-email', type=arg_parsers.RegexpValidator('.*@.*\\..*', 'must provide a valid email address'), help='Service account to run the workers as.')
parser.add_argument('--max-workers', type=int, help='Maximum number of workers to run.')
parser.add_argument('--disable-public-ips', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.disable_public_ips), help='Cloud Dataflow workers must not use public IP addresses.')
parser.add_argument('--num-workers', type=int, help='Initial number of workers to use.')
parser.add_argument('--worker-machine-type', help='Type of machine to use for workers. Defaults to server-specified.')
parser.add_argument('--subnetwork', help='Compute Engine subnetwork for launching instances to run your pipeline.')
parser.add_argument('--network', help='Compute Engine network for launching instances to run your pipeline.')
parser.add_argument('--dataflow-kms-key', help='Cloud KMS key to protect the job resources.')
region_group = parser.add_mutually_exclusive_group()
region_group.add_argument('--worker-region', help='Region to run the workers in.')
region_group.add_argument('--worker-zone', help='Zone to run the workers in.')
parser.add_argument('--enable-streaming-engine', action=actions.StoreBooleanProperty(properties.VALUES.dataflow.enable_streaming_engine), help='Enabling Streaming Engine for the streaming job.')
parser.add_argument('--additional-experiments', metavar='ADDITIONAL_EXPERIMENTS', type=arg_parsers.ArgList(), action=arg_parsers.UpdateAction, help='Additional experiments to pass to the job.')
parser.add_argument('--additional-user-labels', metavar='ADDITIONAL_USER_LABELS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Additional user labels to pass to the job.')
parser.add_argument('--parameters', metavar='PARAMETERS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Parameters to pass to the job.')
streaming_update_args = parser.add_argument_group()
streaming_update_args.add_argument('--transform-name-mappings', metavar='TRANSFORM_NAME_MAPPINGS', type=arg_parsers.ArgDict(), action=arg_parsers.UpdateAction, help='Transform name mappings for the streaming update job.')
streaming_update_args.add_argument('--update', help='Set this to true for streaming update jobs.', action=arg_parsers.StoreTrueFalseAction, required=True)
parser.add_argument('--flexrs-goal', help='FlexRS goal for the flex template job.', choices=['COST_OPTIMIZED', 'SPEED_OPTIMIZED'])<|docstring|>Register flags for this command.
Args:
parser: argparse.ArgumentParser to register arguments with.<|endoftext|>
|
2c8f0ef308f7c50bdf4ed390010afba920a4ec0b680042f3c2082a3e0945f288
|
def Run(self, args):
'Runs the command.\n\n Args:\n args: The arguments that were provided to this command invocation.\n\n Returns:\n A Job message.\n '
arguments = apis.TemplateArguments(project_id=properties.VALUES.core.project.Get(required=True), region_id=dataflow_util.GetRegion(args), job_name=args.job_name, gcs_location=args.template_file_gcs_location, max_workers=args.max_workers, num_workers=args.num_workers, network=args.network, subnetwork=args.subnetwork, worker_machine_type=args.worker_machine_type, kms_key_name=args.dataflow_kms_key, staging_location=args.staging_location, temp_location=args.temp_location, disable_public_ips=properties.VALUES.dataflow.disable_public_ips.GetBool(), service_account_email=args.service_account_email, worker_region=args.worker_region, worker_zone=args.worker_zone, enable_streaming_engine=properties.VALUES.dataflow.enable_streaming_engine.GetBool(), additional_experiments=args.additional_experiments, additional_user_labels=args.additional_user_labels, streaming_update=args.update, transform_name_mappings=args.transform_name_mappings, flexrs_goal=args.flexrs_goal, parameters=args.parameters)
return apis.Templates.CreateJobFromFlexTemplate(arguments)
|
Runs the command.
Args:
args: The arguments that were provided to this command invocation.
Returns:
A Job message.
|
lib/surface/dataflow/flex_template/run.py
|
Run
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2
|
python
|
def Run(self, args):
'Runs the command.\n\n Args:\n args: The arguments that were provided to this command invocation.\n\n Returns:\n A Job message.\n '
arguments = apis.TemplateArguments(project_id=properties.VALUES.core.project.Get(required=True), region_id=dataflow_util.GetRegion(args), job_name=args.job_name, gcs_location=args.template_file_gcs_location, max_workers=args.max_workers, num_workers=args.num_workers, network=args.network, subnetwork=args.subnetwork, worker_machine_type=args.worker_machine_type, kms_key_name=args.dataflow_kms_key, staging_location=args.staging_location, temp_location=args.temp_location, disable_public_ips=properties.VALUES.dataflow.disable_public_ips.GetBool(), service_account_email=args.service_account_email, worker_region=args.worker_region, worker_zone=args.worker_zone, enable_streaming_engine=properties.VALUES.dataflow.enable_streaming_engine.GetBool(), additional_experiments=args.additional_experiments, additional_user_labels=args.additional_user_labels, streaming_update=args.update, transform_name_mappings=args.transform_name_mappings, flexrs_goal=args.flexrs_goal, parameters=args.parameters)
return apis.Templates.CreateJobFromFlexTemplate(arguments)
|
def Run(self, args):
'Runs the command.\n\n Args:\n args: The arguments that were provided to this command invocation.\n\n Returns:\n A Job message.\n '
arguments = apis.TemplateArguments(project_id=properties.VALUES.core.project.Get(required=True), region_id=dataflow_util.GetRegion(args), job_name=args.job_name, gcs_location=args.template_file_gcs_location, max_workers=args.max_workers, num_workers=args.num_workers, network=args.network, subnetwork=args.subnetwork, worker_machine_type=args.worker_machine_type, kms_key_name=args.dataflow_kms_key, staging_location=args.staging_location, temp_location=args.temp_location, disable_public_ips=properties.VALUES.dataflow.disable_public_ips.GetBool(), service_account_email=args.service_account_email, worker_region=args.worker_region, worker_zone=args.worker_zone, enable_streaming_engine=properties.VALUES.dataflow.enable_streaming_engine.GetBool(), additional_experiments=args.additional_experiments, additional_user_labels=args.additional_user_labels, streaming_update=args.update, transform_name_mappings=args.transform_name_mappings, flexrs_goal=args.flexrs_goal, parameters=args.parameters)
return apis.Templates.CreateJobFromFlexTemplate(arguments)<|docstring|>Runs the command.
Args:
args: The arguments that were provided to this command invocation.
Returns:
A Job message.<|endoftext|>
|
cd69df22c87b0c42291a84ffa2e637fc059f96ef877dd43135cfb9ae0bfec3b5
|
def isPalindrome(self, head):
'\n Time: N\n Space: 1\n :type head: ListNode\n :rtype: bool\n '
slow = fast = head
rev = None
while (fast and fast.next):
fast = fast.next.next
(rev, rev.next, slow) = (slow, rev, slow.next)
if fast:
slow = slow.next
while (rev and (rev.val == slow.val)):
slow = slow.next
rev = rev.next
return (not rev)
|
Time: N
Space: 1
:type head: ListNode
:rtype: bool
|
linkedlist/palindrome_ll.py
|
isPalindrome
|
kandarpck/leetcode
| 0
|
python
|
def isPalindrome(self, head):
'\n Time: N\n Space: 1\n :type head: ListNode\n :rtype: bool\n '
slow = fast = head
rev = None
while (fast and fast.next):
fast = fast.next.next
(rev, rev.next, slow) = (slow, rev, slow.next)
if fast:
slow = slow.next
while (rev and (rev.val == slow.val)):
slow = slow.next
rev = rev.next
return (not rev)
|
def isPalindrome(self, head):
'\n Time: N\n Space: 1\n :type head: ListNode\n :rtype: bool\n '
slow = fast = head
rev = None
while (fast and fast.next):
fast = fast.next.next
(rev, rev.next, slow) = (slow, rev, slow.next)
if fast:
slow = slow.next
while (rev and (rev.val == slow.val)):
slow = slow.next
rev = rev.next
return (not rev)<|docstring|>Time: N
Space: 1
:type head: ListNode
:rtype: bool<|endoftext|>
|
2a3ef07858480540cb9217e076ba554a2752fc4eff1a2137d13ea68abe80bf53
|
def populateSheep(self, n_sheep):
' populate the world with n_sheep sheep '
for _ in xrange(n_sheep):
self.addSheep()
|
populate the world with n_sheep sheep
|
sheepherding/world/world.py
|
populateSheep
|
schmit/sheepherding
| 2
|
python
|
def populateSheep(self, n_sheep):
' '
for _ in xrange(n_sheep):
self.addSheep()
|
def populateSheep(self, n_sheep):
' '
for _ in xrange(n_sheep):
self.addSheep()<|docstring|>populate the world with n_sheep sheep<|endoftext|>
|
6b052db291c703b4dbf198b24037d181aaab08157f52225b67faf8e08900ef77
|
def addDog(self):
' add a specific dog to the world at random location '
self.dogs.append(Dog(self))
|
add a specific dog to the world at random location
|
sheepherding/world/world.py
|
addDog
|
schmit/sheepherding
| 2
|
python
|
def addDog(self):
' '
self.dogs.append(Dog(self))
|
def addDog(self):
' '
self.dogs.append(Dog(self))<|docstring|>add a specific dog to the world at random location<|endoftext|>
|
d1fcaab91fd637adb17276565def1646abb72329cf68e94089a699ae532ba3f8
|
def reset(self):
' random location for animals '
for sheep in self.sheeps:
sheep.reset()
for dog in self.dogs:
dog.reset()
|
random location for animals
|
sheepherding/world/world.py
|
reset
|
schmit/sheepherding
| 2
|
python
|
def reset(self):
' '
for sheep in self.sheeps:
sheep.reset()
for dog in self.dogs:
dog.reset()
|
def reset(self):
' '
for sheep in self.sheeps:
sheep.reset()
for dog in self.dogs:
dog.reset()<|docstring|>random location for animals<|endoftext|>
|
f2bdd13a0238ca7b4df0d658ad3de7e6febaa023627950ed0eb1dabda705c16b
|
def run(self, seconds):
' run world for a number of seconds '
iteration = 0
reward = 0
while (iteration < (30 * seconds)):
self.update()
if self.ai.done:
break
iteration += 1
self.reset()
|
run world for a number of seconds
|
sheepherding/world/world.py
|
run
|
schmit/sheepherding
| 2
|
python
|
def run(self, seconds):
' '
iteration = 0
reward = 0
while (iteration < (30 * seconds)):
self.update()
if self.ai.done:
break
iteration += 1
self.reset()
|
def run(self, seconds):
' '
iteration = 0
reward = 0
while (iteration < (30 * seconds)):
self.update()
if self.ai.done:
break
iteration += 1
self.reset()<|docstring|>run world for a number of seconds<|endoftext|>
|
393d6982cdfc98e75fe1e3d0f76995dbafd2ca1c5bf6984199cd61dd21ad50a8
|
def __init__(self):
'初始化类'
self.v_debug = 0
self.v_password = b''
self.v_pagenos = set()
self.v_maxpages = 0
self.v_outfile = None
self.v_outtype = None
self.v_imagewriter = None
self.v_rotation = 0
self.v_stripcontrol = False
self.v_layoutmode = 'normal'
self.v_encoding = 'utf-8'
self.v_pageno = 1
self.v_scale = 1
self.v_caching = True
self.v_showpageno = True
self.v_laparams = LAParams()
self.v_filename = None
|
初始化类
|
conversion_pdf.py
|
__init__
|
student-assistant/conversion_pdf
| 0
|
python
|
def __init__(self):
self.v_debug = 0
self.v_password = b
self.v_pagenos = set()
self.v_maxpages = 0
self.v_outfile = None
self.v_outtype = None
self.v_imagewriter = None
self.v_rotation = 0
self.v_stripcontrol = False
self.v_layoutmode = 'normal'
self.v_encoding = 'utf-8'
self.v_pageno = 1
self.v_scale = 1
self.v_caching = True
self.v_showpageno = True
self.v_laparams = LAParams()
self.v_filename = None
|
def __init__(self):
self.v_debug = 0
self.v_password = b
self.v_pagenos = set()
self.v_maxpages = 0
self.v_outfile = None
self.v_outtype = None
self.v_imagewriter = None
self.v_rotation = 0
self.v_stripcontrol = False
self.v_layoutmode = 'normal'
self.v_encoding = 'utf-8'
self.v_pageno = 1
self.v_scale = 1
self.v_caching = True
self.v_showpageno = True
self.v_laparams = LAParams()
self.v_filename = None<|docstring|>初始化类<|endoftext|>
|
828b8cc358b0199e1acb129aa08f97445a0d8e99e72b05c72ae9798ce14d8c37
|
def checkKeywords(_name, keywords, **kw):
'test is all kyes in **kw are in list keywords'
for key in list(kw.keys()):
if (key not in keywords):
print(('WARNING: Keyword %s not recognized for %s' % (key, _name)))
|
test is all kyes in **kw are in list keywords
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
checkKeywords
|
e-mayo/mscreen
| 9
|
python
|
def checkKeywords(_name, keywords, **kw):
for key in list(kw.keys()):
if (key not in keywords):
print(('WARNING: Keyword %s not recognized for %s' % (key, _name)))
|
def checkKeywords(_name, keywords, **kw):
for key in list(kw.keys()):
if (key not in keywords):
print(('WARNING: Keyword %s not recognized for %s' % (key, _name)))<|docstring|>test is all kyes in **kw are in list keywords<|endoftext|>
|
d8ca2836d8cec2ee27e6220a58a8a760f7b2dd4cb08107a01247487305fdbb06
|
def __init__(self, chg=1.0, con=1.0, rad=1.0):
'Constructor for class Ion'
self.charge = chg
self.concentration = con
self.radius = rad
|
Constructor for class Ion
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
__init__
|
e-mayo/mscreen
| 9
|
python
|
def __init__(self, chg=1.0, con=1.0, rad=1.0):
self.charge = chg
self.concentration = con
self.radius = rad
|
def __init__(self, chg=1.0, con=1.0, rad=1.0):
self.charge = chg
self.concentration = con
self.radius = rad<|docstring|>Constructor for class Ion<|endoftext|>
|
dab34f9085c0bd7787f7e75793aafcb620e9c25138881a5618f9a80974b7469e
|
def toString(self):
'Converts to string representation'
return ('%.3f, %.3f, %.3f' % (self.charge, self.concentration, self.radius))
|
Converts to string representation
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
toString
|
e-mayo/mscreen
| 9
|
python
|
def toString(self):
return ('%.3f, %.3f, %.3f' % (self.charge, self.concentration, self.radius))
|
def toString(self):
return ('%.3f, %.3f, %.3f' % (self.charge, self.concentration, self.radius))<|docstring|>Converts to string representation<|endoftext|>
|
ad93dea7b9796cd31fcdbcb42a2ed2efc1f650aa09070a2a23e1824d4bc1d46b
|
def defaultGridSizes(self, mol1, mol2=None):
'set default fine and corase grid sizes and centers'
coords = mol1.findType(Atom).coords
if (self.calculationType == 'Binding energy'):
assert (mol2 is not None)
coords += mol2.findType(Atom).coords
center = ((numpy.maximum.reduce(coords) + numpy.minimum.reduce(coords)) * 0.5)
center = center.tolist()
self.fineCenterX = self.coarseCenterX = round(center[0], 4)
self.fineCenterY = self.coarseCenterY = round(center[1], 4)
self.fineCenterZ = self.coarseCenterZ = round(center[2], 4)
length = (numpy.maximum.reduce(coords) - numpy.minimum.reduce(coords))
self.coarseLengthX = ((self.CFAC * length.tolist()[0]) + 10.0)
self.coarseLengthY = ((self.CFAC * length.tolist()[1]) + 10.0)
self.coarseLengthZ = ((self.CFAC * length.tolist()[2]) + 10.0)
self.fineLengthX = (length.tolist()[0] + 10.0)
self.fineLengthY = (length.tolist()[1] + 10.0)
self.fineLengthZ = (length.tolist()[2] + 10.0)
|
set default fine and corase grid sizes and centers
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
defaultGridSizes
|
e-mayo/mscreen
| 9
|
python
|
def defaultGridSizes(self, mol1, mol2=None):
coords = mol1.findType(Atom).coords
if (self.calculationType == 'Binding energy'):
assert (mol2 is not None)
coords += mol2.findType(Atom).coords
center = ((numpy.maximum.reduce(coords) + numpy.minimum.reduce(coords)) * 0.5)
center = center.tolist()
self.fineCenterX = self.coarseCenterX = round(center[0], 4)
self.fineCenterY = self.coarseCenterY = round(center[1], 4)
self.fineCenterZ = self.coarseCenterZ = round(center[2], 4)
length = (numpy.maximum.reduce(coords) - numpy.minimum.reduce(coords))
self.coarseLengthX = ((self.CFAC * length.tolist()[0]) + 10.0)
self.coarseLengthY = ((self.CFAC * length.tolist()[1]) + 10.0)
self.coarseLengthZ = ((self.CFAC * length.tolist()[2]) + 10.0)
self.fineLengthX = (length.tolist()[0] + 10.0)
self.fineLengthY = (length.tolist()[1] + 10.0)
self.fineLengthZ = (length.tolist()[2] + 10.0)
|
def defaultGridSizes(self, mol1, mol2=None):
coords = mol1.findType(Atom).coords
if (self.calculationType == 'Binding energy'):
assert (mol2 is not None)
coords += mol2.findType(Atom).coords
center = ((numpy.maximum.reduce(coords) + numpy.minimum.reduce(coords)) * 0.5)
center = center.tolist()
self.fineCenterX = self.coarseCenterX = round(center[0], 4)
self.fineCenterY = self.coarseCenterY = round(center[1], 4)
self.fineCenterZ = self.coarseCenterZ = round(center[2], 4)
length = (numpy.maximum.reduce(coords) - numpy.minimum.reduce(coords))
self.coarseLengthX = ((self.CFAC * length.tolist()[0]) + 10.0)
self.coarseLengthY = ((self.CFAC * length.tolist()[1]) + 10.0)
self.coarseLengthZ = ((self.CFAC * length.tolist()[2]) + 10.0)
self.fineLengthX = (length.tolist()[0] + 10.0)
self.fineLengthY = (length.tolist()[1] + 10.0)
self.fineLengthZ = (length.tolist()[2] + 10.0)<|docstring|>set default fine and corase grid sizes and centers<|endoftext|>
|
403566c35be1eb9a0850555a2c1d04c8c523f2eab5ea038d9234d280a46f9d85
|
def __init__(self, name='Default', **kw):
'Constructor for class APBSParams'
from mglutil.util.packageFilePath import getBinary, findFilePath, which
if (not hasattr(self, 'APBS_Path')):
try:
self.APBS_Path = getBinary('apbs', 'binaries')
except ImportError:
self.APBS_Path = None
if (not self.APBS_Path):
self.APBS_Path = which('apbs')
self.pdb2pqr_Path = findFilePath('pdb2pqr.py', 'MolKit.pdb2pqr')
self.name = name
self.pdb2pqr_ForceField = 'amber'
self.calculationType = 'Electrostatic potential'
self.pbeType = 'Linearized'
self.boundaryConditions = 'Single Debye-Huckel'
self.chargeDiscretization = 'Cubic B-spline'
self.surfaceCalculation = 'Cubic B-spline'
self.sdens = 10.0
self.splineWindow = 0.3
self.molecule1Path = ''
self.molecule2Path = ''
self.complexPath = ''
self.energyOutput = 'Total'
self.forceOutput = ''
self.projectFolder = 'apbs-project'
self.chargeDistributionFile = ''
self.potentialFile = 'OpenDX'
self.solventAccessibilityFile = ''
self.splineBasedAccessibilityFile = ''
self.VDWAccessibilityFile = ''
self.ionAccessibilityFile = ''
self.laplacianOfPotentialFile = ''
self.energyDensityFile = ''
self.ionNumberFile = ''
self.ionChargeDensityFile = ''
self.xShiftedDielectricFile = ''
self.yShiftedDielectricFile = ''
self.zShiftedDielectricFile = ''
self.kappaFunctionFile = ''
self.gridPointsX = 65
self.gridPointsY = 65
self.gridPointsZ = 65
self.coarseLengthX = 40
self.coarseLengthY = 50
self.coarseLengthZ = 60
self.coarseCenterX = 0
self.coarseCenterY = 0
self.coarseCenterZ = 0
self.fineLengthX = 20
self.fineLengthY = 35
self.fineLengthZ = 30
self.fineCenterX = 0
self.fineCenterY = 0
self.fineCenterZ = 0
self.proteinDielectric = 2.0
self.solventDielectric = 78.54
self.solventRadius = 1.4
self.systemTemperature = 298.15
self.saltConcentration = 0.01
self.ions = []
self.Set(*(), **kw)
|
Constructor for class APBSParams
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
__init__
|
e-mayo/mscreen
| 9
|
python
|
def __init__(self, name='Default', **kw):
from mglutil.util.packageFilePath import getBinary, findFilePath, which
if (not hasattr(self, 'APBS_Path')):
try:
self.APBS_Path = getBinary('apbs', 'binaries')
except ImportError:
self.APBS_Path = None
if (not self.APBS_Path):
self.APBS_Path = which('apbs')
self.pdb2pqr_Path = findFilePath('pdb2pqr.py', 'MolKit.pdb2pqr')
self.name = name
self.pdb2pqr_ForceField = 'amber'
self.calculationType = 'Electrostatic potential'
self.pbeType = 'Linearized'
self.boundaryConditions = 'Single Debye-Huckel'
self.chargeDiscretization = 'Cubic B-spline'
self.surfaceCalculation = 'Cubic B-spline'
self.sdens = 10.0
self.splineWindow = 0.3
self.molecule1Path =
self.molecule2Path =
self.complexPath =
self.energyOutput = 'Total'
self.forceOutput =
self.projectFolder = 'apbs-project'
self.chargeDistributionFile =
self.potentialFile = 'OpenDX'
self.solventAccessibilityFile =
self.splineBasedAccessibilityFile =
self.VDWAccessibilityFile =
self.ionAccessibilityFile =
self.laplacianOfPotentialFile =
self.energyDensityFile =
self.ionNumberFile =
self.ionChargeDensityFile =
self.xShiftedDielectricFile =
self.yShiftedDielectricFile =
self.zShiftedDielectricFile =
self.kappaFunctionFile =
self.gridPointsX = 65
self.gridPointsY = 65
self.gridPointsZ = 65
self.coarseLengthX = 40
self.coarseLengthY = 50
self.coarseLengthZ = 60
self.coarseCenterX = 0
self.coarseCenterY = 0
self.coarseCenterZ = 0
self.fineLengthX = 20
self.fineLengthY = 35
self.fineLengthZ = 30
self.fineCenterX = 0
self.fineCenterY = 0
self.fineCenterZ = 0
self.proteinDielectric = 2.0
self.solventDielectric = 78.54
self.solventRadius = 1.4
self.systemTemperature = 298.15
self.saltConcentration = 0.01
self.ions = []
self.Set(*(), **kw)
|
def __init__(self, name='Default', **kw):
from mglutil.util.packageFilePath import getBinary, findFilePath, which
if (not hasattr(self, 'APBS_Path')):
try:
self.APBS_Path = getBinary('apbs', 'binaries')
except ImportError:
self.APBS_Path = None
if (not self.APBS_Path):
self.APBS_Path = which('apbs')
self.pdb2pqr_Path = findFilePath('pdb2pqr.py', 'MolKit.pdb2pqr')
self.name = name
self.pdb2pqr_ForceField = 'amber'
self.calculationType = 'Electrostatic potential'
self.pbeType = 'Linearized'
self.boundaryConditions = 'Single Debye-Huckel'
self.chargeDiscretization = 'Cubic B-spline'
self.surfaceCalculation = 'Cubic B-spline'
self.sdens = 10.0
self.splineWindow = 0.3
self.molecule1Path =
self.molecule2Path =
self.complexPath =
self.energyOutput = 'Total'
self.forceOutput =
self.projectFolder = 'apbs-project'
self.chargeDistributionFile =
self.potentialFile = 'OpenDX'
self.solventAccessibilityFile =
self.splineBasedAccessibilityFile =
self.VDWAccessibilityFile =
self.ionAccessibilityFile =
self.laplacianOfPotentialFile =
self.energyDensityFile =
self.ionNumberFile =
self.ionChargeDensityFile =
self.xShiftedDielectricFile =
self.yShiftedDielectricFile =
self.zShiftedDielectricFile =
self.kappaFunctionFile =
self.gridPointsX = 65
self.gridPointsY = 65
self.gridPointsZ = 65
self.coarseLengthX = 40
self.coarseLengthY = 50
self.coarseLengthZ = 60
self.coarseCenterX = 0
self.coarseCenterY = 0
self.coarseCenterZ = 0
self.fineLengthX = 20
self.fineLengthY = 35
self.fineLengthZ = 30
self.fineCenterX = 0
self.fineCenterY = 0
self.fineCenterZ = 0
self.proteinDielectric = 2.0
self.solventDielectric = 78.54
self.solventRadius = 1.4
self.systemTemperature = 298.15
self.saltConcentration = 0.01
self.ions = []
self.Set(*(), **kw)<|docstring|>Constructor for class APBSParams<|endoftext|>
|
463070a2f409b9436da727550bfeff096c3c0a8fc85c153387d8de8727031654
|
def Set(self, check=1, **kw):
'Sets APBSParams member variable(s)'
if check:
checkKeywords(*(('APBSParam object' + self.name), self.keywords), **kw)
val = kw.get('APBS_Path', None)
if val:
if os.access(val, os.X_OK):
self.APBS_Path = val
val = kw.get('pdb2pqr_Path', None)
if val:
if os.path.exists(val):
self.pdb2pqr_Path = val
val = kw.get('name', None)
if val:
self.name = val
val = kw.get('pdb2pqr_ForceField', None)
if val:
assert (val in ['amber', 'charmm', 'parse'])
self.pdb2pqr_ForceField = val
val = kw.get('projectFolder', None)
if val:
self.projectFolder = val
val = kw.get('molecule1Path', None)
if val:
self.molecule1Path = val
val = kw.get('molecule2Path', None)
if val:
self.molecule2Path = val
val = kw.get('complexPath', None)
if val:
self.complexPath = val
val = kw.get('calculationType', None)
if val:
assert (val in self.CALCULATIONTYPES)
if ((val == 'Solvation energy') or (val == 'Electrostatic potential')):
assert (self.molecule1Path is not None)
self.calculationType = val
elif (val == 'Binding energy'):
assert (self.molecule1Path is not None)
assert (self.molecule2Path is not None)
assert (self.complexPath is not None)
self.calculationType = val
val = kw.get('pbeType', None)
if val:
assert (val in self.PBETYPES)
self.pbeType = val
val = kw.get('boundaryConditions', None)
if val:
assert (val in self.BOUNDARYTYPES)
self.boundaryConditions = val
val = kw.get('chargeDiscretization', None)
if val:
self.chargeDiscretization = val
val = kw.get('surfaceCalculation', None)
if val:
self.surfaceCalculation = val
val = kw.get('sdens', None)
if val:
assert ((val != '') and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('splineWindow', None)
if val:
assert ((val != '') and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('energyOutput', None)
if val:
assert (val in self.ENERGYOUTPUTTYPES)
self.energyOutput = val
val = kw.get('forceOutput', None)
if val:
assert (val in self.FORCEOUTPUTTYPES)
self.forceOutput = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('potentialFile', None)
if val:
assert (val in self.FILETYPES)
self.potentialFile = val
val = kw.get('solventAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.solventAccessibilityFile = val
val = kw.get('splineBasedAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.splineBasedAccessibilityFile = val
val = kw.get('VDWAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.VDWAccessibilityFile = val
val = kw.get('ionAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionAccessibilityFile = val
val = kw.get('laplacianOfPotentialFile', None)
if val:
assert (val in self.FILETYPES)
self.laplacianOfPotentialFile = val
val = kw.get('energyDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.energyDensityFile = val
val = kw.get('ionNumberFile', None)
if val:
assert (val in self.FILETYPES)
self.ionNumberFile = val
val = kw.get('ionChargeDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionChargeDensityFile = val
val = kw.get('xShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.xShiftedDielectricFile = val
val = kw.get('yShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.yShiftedDielectricFile = val
val = kw.get('zShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.zShiftedDielectricFile = val
val = kw.get('kappaFunctionFile', None)
if val:
assert (val in self.FILETYPES)
self.kappaFunctionFile = val
val = kw.get('gridPointsX', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsX = val
val = kw.get('gridPointsY', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsY = val
val = kw.get('gridPointsZ', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsZ = val
val = kw.get('coarseLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthX = val
val = kw.get('coarseLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthY = val
val = kw.get('coarseLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthZ = val
val = kw.get('coarseCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterX = val
val = kw.get('coarseCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterY = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('fineLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthX = val
val = kw.get('fineLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthY = val
val = kw.get('fineLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthZ = val
val = kw.get('fineCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterX = val
val = kw.get('fineCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterY = val
val = kw.get('fineCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterZ = val
val = kw.get('proteinDielectric', None)
if val:
assert isinstance(val, float)
self.proteinDielectric = val
val = kw.get('solventDielectric', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.solventDielectric = val
val = kw.get('solventRadius', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.solventRadius = val
val = kw.get('systemTemperature', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.systemTemperature = val
val = kw.get('ions', [])
for ion in val:
assert (ion.__doc__ == Ion.__doc__)
self.ions.append(ion)
|
Sets APBSParams member variable(s)
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
Set
|
e-mayo/mscreen
| 9
|
python
|
def Set(self, check=1, **kw):
if check:
checkKeywords(*(('APBSParam object' + self.name), self.keywords), **kw)
val = kw.get('APBS_Path', None)
if val:
if os.access(val, os.X_OK):
self.APBS_Path = val
val = kw.get('pdb2pqr_Path', None)
if val:
if os.path.exists(val):
self.pdb2pqr_Path = val
val = kw.get('name', None)
if val:
self.name = val
val = kw.get('pdb2pqr_ForceField', None)
if val:
assert (val in ['amber', 'charmm', 'parse'])
self.pdb2pqr_ForceField = val
val = kw.get('projectFolder', None)
if val:
self.projectFolder = val
val = kw.get('molecule1Path', None)
if val:
self.molecule1Path = val
val = kw.get('molecule2Path', None)
if val:
self.molecule2Path = val
val = kw.get('complexPath', None)
if val:
self.complexPath = val
val = kw.get('calculationType', None)
if val:
assert (val in self.CALCULATIONTYPES)
if ((val == 'Solvation energy') or (val == 'Electrostatic potential')):
assert (self.molecule1Path is not None)
self.calculationType = val
elif (val == 'Binding energy'):
assert (self.molecule1Path is not None)
assert (self.molecule2Path is not None)
assert (self.complexPath is not None)
self.calculationType = val
val = kw.get('pbeType', None)
if val:
assert (val in self.PBETYPES)
self.pbeType = val
val = kw.get('boundaryConditions', None)
if val:
assert (val in self.BOUNDARYTYPES)
self.boundaryConditions = val
val = kw.get('chargeDiscretization', None)
if val:
self.chargeDiscretization = val
val = kw.get('surfaceCalculation', None)
if val:
self.surfaceCalculation = val
val = kw.get('sdens', None)
if val:
assert ((val != ) and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('splineWindow', None)
if val:
assert ((val != ) and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('energyOutput', None)
if val:
assert (val in self.ENERGYOUTPUTTYPES)
self.energyOutput = val
val = kw.get('forceOutput', None)
if val:
assert (val in self.FORCEOUTPUTTYPES)
self.forceOutput = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('potentialFile', None)
if val:
assert (val in self.FILETYPES)
self.potentialFile = val
val = kw.get('solventAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.solventAccessibilityFile = val
val = kw.get('splineBasedAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.splineBasedAccessibilityFile = val
val = kw.get('VDWAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.VDWAccessibilityFile = val
val = kw.get('ionAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionAccessibilityFile = val
val = kw.get('laplacianOfPotentialFile', None)
if val:
assert (val in self.FILETYPES)
self.laplacianOfPotentialFile = val
val = kw.get('energyDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.energyDensityFile = val
val = kw.get('ionNumberFile', None)
if val:
assert (val in self.FILETYPES)
self.ionNumberFile = val
val = kw.get('ionChargeDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionChargeDensityFile = val
val = kw.get('xShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.xShiftedDielectricFile = val
val = kw.get('yShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.yShiftedDielectricFile = val
val = kw.get('zShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.zShiftedDielectricFile = val
val = kw.get('kappaFunctionFile', None)
if val:
assert (val in self.FILETYPES)
self.kappaFunctionFile = val
val = kw.get('gridPointsX', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsX = val
val = kw.get('gridPointsY', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsY = val
val = kw.get('gridPointsZ', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsZ = val
val = kw.get('coarseLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthX = val
val = kw.get('coarseLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthY = val
val = kw.get('coarseLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthZ = val
val = kw.get('coarseCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterX = val
val = kw.get('coarseCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterY = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('fineLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthX = val
val = kw.get('fineLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthY = val
val = kw.get('fineLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthZ = val
val = kw.get('fineCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterX = val
val = kw.get('fineCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterY = val
val = kw.get('fineCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterZ = val
val = kw.get('proteinDielectric', None)
if val:
assert isinstance(val, float)
self.proteinDielectric = val
val = kw.get('solventDielectric', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.solventDielectric = val
val = kw.get('solventRadius', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.solventRadius = val
val = kw.get('systemTemperature', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.systemTemperature = val
val = kw.get('ions', [])
for ion in val:
assert (ion.__doc__ == Ion.__doc__)
self.ions.append(ion)
|
def Set(self, check=1, **kw):
if check:
checkKeywords(*(('APBSParam object' + self.name), self.keywords), **kw)
val = kw.get('APBS_Path', None)
if val:
if os.access(val, os.X_OK):
self.APBS_Path = val
val = kw.get('pdb2pqr_Path', None)
if val:
if os.path.exists(val):
self.pdb2pqr_Path = val
val = kw.get('name', None)
if val:
self.name = val
val = kw.get('pdb2pqr_ForceField', None)
if val:
assert (val in ['amber', 'charmm', 'parse'])
self.pdb2pqr_ForceField = val
val = kw.get('projectFolder', None)
if val:
self.projectFolder = val
val = kw.get('molecule1Path', None)
if val:
self.molecule1Path = val
val = kw.get('molecule2Path', None)
if val:
self.molecule2Path = val
val = kw.get('complexPath', None)
if val:
self.complexPath = val
val = kw.get('calculationType', None)
if val:
assert (val in self.CALCULATIONTYPES)
if ((val == 'Solvation energy') or (val == 'Electrostatic potential')):
assert (self.molecule1Path is not None)
self.calculationType = val
elif (val == 'Binding energy'):
assert (self.molecule1Path is not None)
assert (self.molecule2Path is not None)
assert (self.complexPath is not None)
self.calculationType = val
val = kw.get('pbeType', None)
if val:
assert (val in self.PBETYPES)
self.pbeType = val
val = kw.get('boundaryConditions', None)
if val:
assert (val in self.BOUNDARYTYPES)
self.boundaryConditions = val
val = kw.get('chargeDiscretization', None)
if val:
self.chargeDiscretization = val
val = kw.get('surfaceCalculation', None)
if val:
self.surfaceCalculation = val
val = kw.get('sdens', None)
if val:
assert ((val != ) and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('splineWindow', None)
if val:
assert ((val != ) and (isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.splineWindow = val
val = kw.get('energyOutput', None)
if val:
assert (val in self.ENERGYOUTPUTTYPES)
self.energyOutput = val
val = kw.get('forceOutput', None)
if val:
assert (val in self.FORCEOUTPUTTYPES)
self.forceOutput = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('chargeDistributionFile', None)
if val:
assert (val in self.FILETYPES)
self.chargeDistributionFile = val
val = kw.get('potentialFile', None)
if val:
assert (val in self.FILETYPES)
self.potentialFile = val
val = kw.get('solventAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.solventAccessibilityFile = val
val = kw.get('splineBasedAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.splineBasedAccessibilityFile = val
val = kw.get('VDWAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.VDWAccessibilityFile = val
val = kw.get('ionAccessibilityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionAccessibilityFile = val
val = kw.get('laplacianOfPotentialFile', None)
if val:
assert (val in self.FILETYPES)
self.laplacianOfPotentialFile = val
val = kw.get('energyDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.energyDensityFile = val
val = kw.get('ionNumberFile', None)
if val:
assert (val in self.FILETYPES)
self.ionNumberFile = val
val = kw.get('ionChargeDensityFile', None)
if val:
assert (val in self.FILETYPES)
self.ionChargeDensityFile = val
val = kw.get('xShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.xShiftedDielectricFile = val
val = kw.get('yShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.yShiftedDielectricFile = val
val = kw.get('zShiftedDielectricFile', None)
if val:
assert (val in self.FILETYPES)
self.zShiftedDielectricFile = val
val = kw.get('kappaFunctionFile', None)
if val:
assert (val in self.FILETYPES)
self.kappaFunctionFile = val
val = kw.get('gridPointsX', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsX = val
val = kw.get('gridPointsY', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsY = val
val = kw.get('gridPointsZ', None)
if val:
assert (val in self.GRID_VALUES)
self.gridPointsZ = val
val = kw.get('coarseLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthX = val
val = kw.get('coarseLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthY = val
val = kw.get('coarseLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.coarseLengthZ = val
val = kw.get('coarseCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterX = val
val = kw.get('coarseCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterY = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('coarseCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.coarseCenterZ = val
val = kw.get('fineLengthX', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthX = val
val = kw.get('fineLengthY', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthY = val
val = kw.get('fineLengthZ', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.fineLengthZ = val
val = kw.get('fineCenterX', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterX = val
val = kw.get('fineCenterY', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterY = val
val = kw.get('fineCenterZ', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.fineCenterZ = val
val = kw.get('proteinDielectric', None)
if val:
assert isinstance(val, float)
self.proteinDielectric = val
val = kw.get('solventDielectric', None)
if val:
assert (isinstance(val, float) or isinstance(val, int))
self.solventDielectric = val
val = kw.get('solventRadius', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.solventRadius = val
val = kw.get('systemTemperature', None)
if val:
assert ((isinstance(val, float) or isinstance(val, int)) and (val > 0.0))
self.systemTemperature = val
val = kw.get('ions', [])
for ion in val:
assert (ion.__doc__ == Ion.__doc__)
self.ions.append(ion)<|docstring|>Sets APBSParams member variable(s)<|endoftext|>
|
d87ea11910088882432a756e0b22f0b27d3f1ba628c1227cddaa403fe559c976
|
def apbsWriteCalculationParams(self, fp, molname):
'None <--- apbsWriteCalculationParams(fp)\n\n Writes APBS Calculation Parameters into fp\n\n '
if (self.pbeType == 'Linearized'):
fp.write('\tlpbe\n')
else:
fp.write('\tnpbe\n')
if (self.boundaryConditions == 'Zero E'):
fp.write('\tbcfl zero\n')
elif (self.boundaryConditions == 'Single Debye-Huckel'):
fp.write('\tbcfl sdh\n')
else:
fp.write('\tbcfl mdh\n')
if (self.chargeDiscretization == 'Trilinear hat-function'):
fp.write('\tchgm spl0\n')
elif (self.chargeDiscretization == 'Cubic B-spline'):
fp.write('\tchgm spl2\n')
else:
fp.write('\tchgm spl4\n')
if (self.surfaceCalculation == 'No smoothing'):
fp.write('\tsrfm mol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Harmonic Average'):
fp.write('\tsrfm smol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Cubic B-spline'):
fp.write('\tsrfm spl2\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
else:
fp.write('\tsrfm spl4\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
if (self.energyOutput == ''):
fp.write('\tcalcenergy no\n')
elif (self.energyOutput == 'Total'):
fp.write('\tcalcenergy total\n')
else:
fp.write('\tcalcenergy comps\n')
if (self.forceOutput == ''):
fp.write('\tcalcforce no\n')
elif (self.forceOutput == 'Total'):
fp.write('\tcalcforce total\n')
else:
fp.write('\tcalcforce comps\n')
tempFileString = (molname + '.chargeDistribution')
if (self.chargeDistributionFile == 'OpenDX'):
fp.write(('\twrite charge dx %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'AVS UCD'):
fp.write(('\twrite charge avs %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'UHBD'):
fp.write(('\twrite charge uhbd %s\n' % tempFileString))
tempFileString = (molname + '.potential')
if (self.potentialFile == 'OpenDX'):
fp.write(('\twrite pot dx %s\n' % tempFileString))
elif (self.potentialFile == 'AVS UCD'):
fp.write(('\twrite pot avs %s\n' % tempFileString))
elif (self.potentialFile == 'UHBD'):
fp.write(('\twrite pot uhbd %s\n' % tempFileString))
tempFileString = (molname + '.solventAccessibility')
if (self.solventAccessibilityFile == 'OpenDX'):
fp.write(('\twrite smol dx %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite smol avs %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'UHBD'):
fp.write(('\twrite smol uhbd %s\n' % tempFileString))
tempFileString = (molname + '.splineBasedAccessibility')
if (self.splineBasedAccessibilityFile == 'OpenDX'):
fp.write(('\twrite sspl dx %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite sspl avs %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'UHBD'):
fp.write(('\twrite sspl uhbd %s\n' % tempFileString))
tempFileString = (molname + '.VDWAccessibility')
if (self.VDWAccessibilityFile == 'OpenDX'):
fp.write(('\twrite vdw dx %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite vdw avs %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'UHBD'):
fp.write(('\twrite vdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionAccessibility')
if (self.ionAccessibilityFile == 'OpenDX'):
fp.write(('\twrite ivdw dx %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite ivdw avs %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'UHBD'):
fp.write(('\twrite ivdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.laplacianOfPotential')
if (self.laplacianOfPotentialFile == 'OpenDX'):
fp.write(('\twrite lap dx %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'AVS UCD'):
fp.write(('\twrite lap avs %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'UHBD'):
fp.write(('\twrite lap uhbd %s\n' % tempFileString))
tempFileString = (molname + '.energyDensity')
if (self.energyDensityFile == 'OpenDX'):
fp.write(('\twrite edens dx %s\n' % tempFileString))
elif (self.energyDensityFile == 'AVS UCD'):
fp.write(('\twrite edens avs %s\n' % tempFileString))
elif (self.energyDensityFile == 'UHBD'):
fp.write(('\twrite edens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionNumber')
if (self.ionNumberFile == 'OpenDX'):
fp.write(('\twrite ndens dx %s\n' % tempFileString))
elif (self.ionNumberFile == 'AVS UCD'):
fp.write(('\twrite ndens avs %s\n' % tempFileString))
elif (self.ionNumberFile == 'UHBD'):
fp.write(('\twrite ndens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionChargeDensity')
if (self.ionChargeDensityFile == 'OpenDX'):
fp.write(('\twrite qdens dx %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'AVS UCD'):
fp.write(('\twrite qdens avs %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'UHBD'):
fp.write(('\twrite qdens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.xShiftedDielectric')
if (self.xShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielx dx %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielx avs %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielx uhbd %s\n' % tempFileString))
tempFileString = (molname + '.yShiftedDielectric')
if (self.yShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite diely dx %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite diely avs %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite diely uhbd %s\n' % tempFileString))
tempFileString = (molname + '.zShiftedDielectric')
if (self.zShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielz dx %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielz avs %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielz uhbd %s\n' % tempFileString))
tempFileString = (molname + '.kappaFunction')
if (self.kappaFunctionFile == 'OpenDX'):
fp.write(('\twrite kappa dx %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'AVS UCD'):
fp.write(('\twrite kappa avs %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'UHBD'):
fp.write(('\twrite kappa uhbd %s\n' % tempFileString))
fp.write('\n')
|
None <--- apbsWriteCalculationParams(fp)
Writes APBS Calculation Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWriteCalculationParams
|
e-mayo/mscreen
| 9
|
python
|
def apbsWriteCalculationParams(self, fp, molname):
'None <--- apbsWriteCalculationParams(fp)\n\n Writes APBS Calculation Parameters into fp\n\n '
if (self.pbeType == 'Linearized'):
fp.write('\tlpbe\n')
else:
fp.write('\tnpbe\n')
if (self.boundaryConditions == 'Zero E'):
fp.write('\tbcfl zero\n')
elif (self.boundaryConditions == 'Single Debye-Huckel'):
fp.write('\tbcfl sdh\n')
else:
fp.write('\tbcfl mdh\n')
if (self.chargeDiscretization == 'Trilinear hat-function'):
fp.write('\tchgm spl0\n')
elif (self.chargeDiscretization == 'Cubic B-spline'):
fp.write('\tchgm spl2\n')
else:
fp.write('\tchgm spl4\n')
if (self.surfaceCalculation == 'No smoothing'):
fp.write('\tsrfm mol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Harmonic Average'):
fp.write('\tsrfm smol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Cubic B-spline'):
fp.write('\tsrfm spl2\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
else:
fp.write('\tsrfm spl4\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
if (self.energyOutput == ):
fp.write('\tcalcenergy no\n')
elif (self.energyOutput == 'Total'):
fp.write('\tcalcenergy total\n')
else:
fp.write('\tcalcenergy comps\n')
if (self.forceOutput == ):
fp.write('\tcalcforce no\n')
elif (self.forceOutput == 'Total'):
fp.write('\tcalcforce total\n')
else:
fp.write('\tcalcforce comps\n')
tempFileString = (molname + '.chargeDistribution')
if (self.chargeDistributionFile == 'OpenDX'):
fp.write(('\twrite charge dx %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'AVS UCD'):
fp.write(('\twrite charge avs %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'UHBD'):
fp.write(('\twrite charge uhbd %s\n' % tempFileString))
tempFileString = (molname + '.potential')
if (self.potentialFile == 'OpenDX'):
fp.write(('\twrite pot dx %s\n' % tempFileString))
elif (self.potentialFile == 'AVS UCD'):
fp.write(('\twrite pot avs %s\n' % tempFileString))
elif (self.potentialFile == 'UHBD'):
fp.write(('\twrite pot uhbd %s\n' % tempFileString))
tempFileString = (molname + '.solventAccessibility')
if (self.solventAccessibilityFile == 'OpenDX'):
fp.write(('\twrite smol dx %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite smol avs %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'UHBD'):
fp.write(('\twrite smol uhbd %s\n' % tempFileString))
tempFileString = (molname + '.splineBasedAccessibility')
if (self.splineBasedAccessibilityFile == 'OpenDX'):
fp.write(('\twrite sspl dx %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite sspl avs %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'UHBD'):
fp.write(('\twrite sspl uhbd %s\n' % tempFileString))
tempFileString = (molname + '.VDWAccessibility')
if (self.VDWAccessibilityFile == 'OpenDX'):
fp.write(('\twrite vdw dx %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite vdw avs %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'UHBD'):
fp.write(('\twrite vdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionAccessibility')
if (self.ionAccessibilityFile == 'OpenDX'):
fp.write(('\twrite ivdw dx %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite ivdw avs %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'UHBD'):
fp.write(('\twrite ivdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.laplacianOfPotential')
if (self.laplacianOfPotentialFile == 'OpenDX'):
fp.write(('\twrite lap dx %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'AVS UCD'):
fp.write(('\twrite lap avs %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'UHBD'):
fp.write(('\twrite lap uhbd %s\n' % tempFileString))
tempFileString = (molname + '.energyDensity')
if (self.energyDensityFile == 'OpenDX'):
fp.write(('\twrite edens dx %s\n' % tempFileString))
elif (self.energyDensityFile == 'AVS UCD'):
fp.write(('\twrite edens avs %s\n' % tempFileString))
elif (self.energyDensityFile == 'UHBD'):
fp.write(('\twrite edens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionNumber')
if (self.ionNumberFile == 'OpenDX'):
fp.write(('\twrite ndens dx %s\n' % tempFileString))
elif (self.ionNumberFile == 'AVS UCD'):
fp.write(('\twrite ndens avs %s\n' % tempFileString))
elif (self.ionNumberFile == 'UHBD'):
fp.write(('\twrite ndens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionChargeDensity')
if (self.ionChargeDensityFile == 'OpenDX'):
fp.write(('\twrite qdens dx %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'AVS UCD'):
fp.write(('\twrite qdens avs %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'UHBD'):
fp.write(('\twrite qdens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.xShiftedDielectric')
if (self.xShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielx dx %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielx avs %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielx uhbd %s\n' % tempFileString))
tempFileString = (molname + '.yShiftedDielectric')
if (self.yShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite diely dx %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite diely avs %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite diely uhbd %s\n' % tempFileString))
tempFileString = (molname + '.zShiftedDielectric')
if (self.zShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielz dx %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielz avs %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielz uhbd %s\n' % tempFileString))
tempFileString = (molname + '.kappaFunction')
if (self.kappaFunctionFile == 'OpenDX'):
fp.write(('\twrite kappa dx %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'AVS UCD'):
fp.write(('\twrite kappa avs %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'UHBD'):
fp.write(('\twrite kappa uhbd %s\n' % tempFileString))
fp.write('\n')
|
def apbsWriteCalculationParams(self, fp, molname):
'None <--- apbsWriteCalculationParams(fp)\n\n Writes APBS Calculation Parameters into fp\n\n '
if (self.pbeType == 'Linearized'):
fp.write('\tlpbe\n')
else:
fp.write('\tnpbe\n')
if (self.boundaryConditions == 'Zero E'):
fp.write('\tbcfl zero\n')
elif (self.boundaryConditions == 'Single Debye-Huckel'):
fp.write('\tbcfl sdh\n')
else:
fp.write('\tbcfl mdh\n')
if (self.chargeDiscretization == 'Trilinear hat-function'):
fp.write('\tchgm spl0\n')
elif (self.chargeDiscretization == 'Cubic B-spline'):
fp.write('\tchgm spl2\n')
else:
fp.write('\tchgm spl4\n')
if (self.surfaceCalculation == 'No smoothing'):
fp.write('\tsrfm mol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Harmonic Average'):
fp.write('\tsrfm smol\n')
fp.write(('\tsdens %.3f\n' % self.sdens))
elif (self.surfaceCalculation == 'Cubic B-spline'):
fp.write('\tsrfm spl2\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
else:
fp.write('\tsrfm spl4\n')
fp.write(('\tswin %.3f\n' % self.splineWindow))
if (self.energyOutput == ):
fp.write('\tcalcenergy no\n')
elif (self.energyOutput == 'Total'):
fp.write('\tcalcenergy total\n')
else:
fp.write('\tcalcenergy comps\n')
if (self.forceOutput == ):
fp.write('\tcalcforce no\n')
elif (self.forceOutput == 'Total'):
fp.write('\tcalcforce total\n')
else:
fp.write('\tcalcforce comps\n')
tempFileString = (molname + '.chargeDistribution')
if (self.chargeDistributionFile == 'OpenDX'):
fp.write(('\twrite charge dx %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'AVS UCD'):
fp.write(('\twrite charge avs %s\n' % tempFileString))
elif (self.chargeDistributionFile == 'UHBD'):
fp.write(('\twrite charge uhbd %s\n' % tempFileString))
tempFileString = (molname + '.potential')
if (self.potentialFile == 'OpenDX'):
fp.write(('\twrite pot dx %s\n' % tempFileString))
elif (self.potentialFile == 'AVS UCD'):
fp.write(('\twrite pot avs %s\n' % tempFileString))
elif (self.potentialFile == 'UHBD'):
fp.write(('\twrite pot uhbd %s\n' % tempFileString))
tempFileString = (molname + '.solventAccessibility')
if (self.solventAccessibilityFile == 'OpenDX'):
fp.write(('\twrite smol dx %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite smol avs %s\n' % tempFileString))
elif (self.solventAccessibilityFile == 'UHBD'):
fp.write(('\twrite smol uhbd %s\n' % tempFileString))
tempFileString = (molname + '.splineBasedAccessibility')
if (self.splineBasedAccessibilityFile == 'OpenDX'):
fp.write(('\twrite sspl dx %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite sspl avs %s\n' % tempFileString))
elif (self.splineBasedAccessibilityFile == 'UHBD'):
fp.write(('\twrite sspl uhbd %s\n' % tempFileString))
tempFileString = (molname + '.VDWAccessibility')
if (self.VDWAccessibilityFile == 'OpenDX'):
fp.write(('\twrite vdw dx %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite vdw avs %s\n' % tempFileString))
elif (self.VDWAccessibilityFile == 'UHBD'):
fp.write(('\twrite vdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionAccessibility')
if (self.ionAccessibilityFile == 'OpenDX'):
fp.write(('\twrite ivdw dx %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'AVS UCD'):
fp.write(('\twrite ivdw avs %s\n' % tempFileString))
elif (self.ionAccessibilityFile == 'UHBD'):
fp.write(('\twrite ivdw uhbd %s\n' % tempFileString))
tempFileString = (molname + '.laplacianOfPotential')
if (self.laplacianOfPotentialFile == 'OpenDX'):
fp.write(('\twrite lap dx %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'AVS UCD'):
fp.write(('\twrite lap avs %s\n' % tempFileString))
elif (self.laplacianOfPotentialFile == 'UHBD'):
fp.write(('\twrite lap uhbd %s\n' % tempFileString))
tempFileString = (molname + '.energyDensity')
if (self.energyDensityFile == 'OpenDX'):
fp.write(('\twrite edens dx %s\n' % tempFileString))
elif (self.energyDensityFile == 'AVS UCD'):
fp.write(('\twrite edens avs %s\n' % tempFileString))
elif (self.energyDensityFile == 'UHBD'):
fp.write(('\twrite edens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionNumber')
if (self.ionNumberFile == 'OpenDX'):
fp.write(('\twrite ndens dx %s\n' % tempFileString))
elif (self.ionNumberFile == 'AVS UCD'):
fp.write(('\twrite ndens avs %s\n' % tempFileString))
elif (self.ionNumberFile == 'UHBD'):
fp.write(('\twrite ndens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.ionChargeDensity')
if (self.ionChargeDensityFile == 'OpenDX'):
fp.write(('\twrite qdens dx %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'AVS UCD'):
fp.write(('\twrite qdens avs %s\n' % tempFileString))
elif (self.ionChargeDensityFile == 'UHBD'):
fp.write(('\twrite qdens uhbd %s\n' % tempFileString))
tempFileString = (molname + '.xShiftedDielectric')
if (self.xShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielx dx %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielx avs %s\n' % tempFileString))
elif (self.xShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielx uhbd %s\n' % tempFileString))
tempFileString = (molname + '.yShiftedDielectric')
if (self.yShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite diely dx %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite diely avs %s\n' % tempFileString))
elif (self.yShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite diely uhbd %s\n' % tempFileString))
tempFileString = (molname + '.zShiftedDielectric')
if (self.zShiftedDielectricFile == 'OpenDX'):
fp.write(('\twrite dielz dx %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'AVS UCD'):
fp.write(('\twrite dielz avs %s\n' % tempFileString))
elif (self.zShiftedDielectricFile == 'UHBD'):
fp.write(('\twrite dielz uhbd %s\n' % tempFileString))
tempFileString = (molname + '.kappaFunction')
if (self.kappaFunctionFile == 'OpenDX'):
fp.write(('\twrite kappa dx %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'AVS UCD'):
fp.write(('\twrite kappa avs %s\n' % tempFileString))
elif (self.kappaFunctionFile == 'UHBD'):
fp.write(('\twrite kappa uhbd %s\n' % tempFileString))
fp.write('\n')<|docstring|>None <--- apbsWriteCalculationParams(fp)
Writes APBS Calculation Parameters into fp<|endoftext|>
|
291e518267220305267e07170c41a3405e898f6814663cfc28bba7a32228d6ac
|
def apbsWriteGridParams(self, fp):
'None <--- apbsWriteGridParams(fp)\n\n Writes APBS Grid Parameters into fp\n\n '
fp.write(('\tdime %d %d %d\n\n' % (self.gridPointsX, self.gridPointsY, self.gridPointsZ)))
fp.write(('\tcglen %.3f %.3f %.3f\n' % (self.coarseLengthX, self.coarseLengthY, self.coarseLengthZ)))
fp.write(('\tcgcent %.3f %.3f %.3f\n' % (self.coarseCenterX, self.coarseCenterY, self.coarseCenterZ)))
fp.write(('\tfglen %.3f %.3f %.3f\n' % (self.fineLengthX, self.fineLengthY, self.fineLengthZ)))
fp.write(('\tfgcent %.3f %.3f %.3f\n' % (self.fineCenterX, self.fineCenterY, self.fineCenterZ)))
fp.write('\n')
|
None <--- apbsWriteGridParams(fp)
Writes APBS Grid Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWriteGridParams
|
e-mayo/mscreen
| 9
|
python
|
def apbsWriteGridParams(self, fp):
'None <--- apbsWriteGridParams(fp)\n\n Writes APBS Grid Parameters into fp\n\n '
fp.write(('\tdime %d %d %d\n\n' % (self.gridPointsX, self.gridPointsY, self.gridPointsZ)))
fp.write(('\tcglen %.3f %.3f %.3f\n' % (self.coarseLengthX, self.coarseLengthY, self.coarseLengthZ)))
fp.write(('\tcgcent %.3f %.3f %.3f\n' % (self.coarseCenterX, self.coarseCenterY, self.coarseCenterZ)))
fp.write(('\tfglen %.3f %.3f %.3f\n' % (self.fineLengthX, self.fineLengthY, self.fineLengthZ)))
fp.write(('\tfgcent %.3f %.3f %.3f\n' % (self.fineCenterX, self.fineCenterY, self.fineCenterZ)))
fp.write('\n')
|
def apbsWriteGridParams(self, fp):
'None <--- apbsWriteGridParams(fp)\n\n Writes APBS Grid Parameters into fp\n\n '
fp.write(('\tdime %d %d %d\n\n' % (self.gridPointsX, self.gridPointsY, self.gridPointsZ)))
fp.write(('\tcglen %.3f %.3f %.3f\n' % (self.coarseLengthX, self.coarseLengthY, self.coarseLengthZ)))
fp.write(('\tcgcent %.3f %.3f %.3f\n' % (self.coarseCenterX, self.coarseCenterY, self.coarseCenterZ)))
fp.write(('\tfglen %.3f %.3f %.3f\n' % (self.fineLengthX, self.fineLengthY, self.fineLengthZ)))
fp.write(('\tfgcent %.3f %.3f %.3f\n' % (self.fineCenterX, self.fineCenterY, self.fineCenterZ)))
fp.write('\n')<|docstring|>None <--- apbsWriteGridParams(fp)
Writes APBS Grid Parameters into fp<|endoftext|>
|
8ad61d4716f0fbd3ea20ebb974f2a3f2c9d59a328976c8720f404c1752e5e74f
|
def apbsWritePhysicsParams(self, fp):
'None <--- apbsWritePhysicsParams(fp)\n\n Writes APBS Physics Parameters into fp\n\n '
fp.write(('\ttemp %.3f\n' % self.systemTemperature))
fp.write(('\tsrad %.3f\n' % self.solventRadius))
fp.write(('\tsdie %.3f\n' % self.solventDielectric))
fp.write(('\tpdie %.3f\n' % self.proteinDielectric))
for i in range(0, len(self.ions)):
fp.write(('\tion %s\n' % self.ions[i].toString()))
if self.saltConcentration:
fp.write(('\tion 1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write(('\tion -1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write('\n')
|
None <--- apbsWritePhysicsParams(fp)
Writes APBS Physics Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWritePhysicsParams
|
e-mayo/mscreen
| 9
|
python
|
def apbsWritePhysicsParams(self, fp):
'None <--- apbsWritePhysicsParams(fp)\n\n Writes APBS Physics Parameters into fp\n\n '
fp.write(('\ttemp %.3f\n' % self.systemTemperature))
fp.write(('\tsrad %.3f\n' % self.solventRadius))
fp.write(('\tsdie %.3f\n' % self.solventDielectric))
fp.write(('\tpdie %.3f\n' % self.proteinDielectric))
for i in range(0, len(self.ions)):
fp.write(('\tion %s\n' % self.ions[i].toString()))
if self.saltConcentration:
fp.write(('\tion 1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write(('\tion -1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write('\n')
|
def apbsWritePhysicsParams(self, fp):
'None <--- apbsWritePhysicsParams(fp)\n\n Writes APBS Physics Parameters into fp\n\n '
fp.write(('\ttemp %.3f\n' % self.systemTemperature))
fp.write(('\tsrad %.3f\n' % self.solventRadius))
fp.write(('\tsdie %.3f\n' % self.solventDielectric))
fp.write(('\tpdie %.3f\n' % self.proteinDielectric))
for i in range(0, len(self.ions)):
fp.write(('\tion %s\n' % self.ions[i].toString()))
if self.saltConcentration:
fp.write(('\tion 1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write(('\tion -1.000, %.3f, 2.000\n' % self.saltConcentration))
fp.write('\n')<|docstring|>None <--- apbsWritePhysicsParams(fp)
Writes APBS Physics Parameters into fp<|endoftext|>
|
d5cf1accf401fb2e1024c63a18fff1c2e28d961eed1d9f9d8088043c73c12920
|
def apbsWriteSolvationEnergy(self, fp):
'None <--- apbsWriteSolvationEnergy(fp)\n\n Writes APBS Solvation Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
self.apbsWriteCalculationParams(fp, (mol_name + '_Vacuum'))
self.apbsWriteGridParams(fp)
tempSolventDielectric = self.solventDielectric
self.solventDielectric = 1.0
tempIons = self.ions
tempSaltConcentration = self.saltConcentration
self.ions = []
self.saltConcentration = None
self.apbsWritePhysicsParams(fp)
self.solventDielectric = tempSolventDielectric
self.ions = tempIons
self.saltConcentration = tempSaltConcentration
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1 - 2\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
None <--- apbsWriteSolvationEnergy(fp)
Writes APBS Solvation Energy Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWriteSolvationEnergy
|
e-mayo/mscreen
| 9
|
python
|
def apbsWriteSolvationEnergy(self, fp):
'None <--- apbsWriteSolvationEnergy(fp)\n\n Writes APBS Solvation Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
self.apbsWriteCalculationParams(fp, (mol_name + '_Vacuum'))
self.apbsWriteGridParams(fp)
tempSolventDielectric = self.solventDielectric
self.solventDielectric = 1.0
tempIons = self.ions
tempSaltConcentration = self.saltConcentration
self.ions = []
self.saltConcentration = None
self.apbsWritePhysicsParams(fp)
self.solventDielectric = tempSolventDielectric
self.ions = tempIons
self.saltConcentration = tempSaltConcentration
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1 - 2\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
def apbsWriteSolvationEnergy(self, fp):
'None <--- apbsWriteSolvationEnergy(fp)\n\n Writes APBS Solvation Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
self.apbsWriteCalculationParams(fp, (mol_name + '_Vacuum'))
self.apbsWriteGridParams(fp)
tempSolventDielectric = self.solventDielectric
self.solventDielectric = 1.0
tempIons = self.ions
tempSaltConcentration = self.saltConcentration
self.ions = []
self.saltConcentration = None
self.apbsWritePhysicsParams(fp)
self.solventDielectric = tempSolventDielectric
self.ions = tempIons
self.saltConcentration = tempSaltConcentration
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1 - 2\n')
fp.write('END\n\n')
fp.write('QUIT\n')<|docstring|>None <--- apbsWriteSolvationEnergy(fp)
Writes APBS Solvation Energy Parameters into fp<|endoftext|>
|
947c4eba4922258b24f7d50f07e56c4a10eaeb934c0536b5e7435180573306ee
|
def apbsWriteBindingEnergy(self, fp):
'None <--- apbsWriteBindingEnergy(fp)\n\n Writes APBS Binding Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write(('\tmol pqr %s\n' % self.molecule2Path))
fp.write(('\tmol pqr %s\n' % self.complexPath))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 2\n')
(file_name, ext) = os.path.splitext(self.molecule2Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 3\n')
(file_name, ext) = os.path.splitext(self.complexPath)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 3 - 2 - 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
None <--- apbsWriteBindingEnergy(fp)
Writes APBS Binding Energy Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWriteBindingEnergy
|
e-mayo/mscreen
| 9
|
python
|
def apbsWriteBindingEnergy(self, fp):
'None <--- apbsWriteBindingEnergy(fp)\n\n Writes APBS Binding Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write(('\tmol pqr %s\n' % self.molecule2Path))
fp.write(('\tmol pqr %s\n' % self.complexPath))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 2\n')
(file_name, ext) = os.path.splitext(self.molecule2Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 3\n')
(file_name, ext) = os.path.splitext(self.complexPath)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 3 - 2 - 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
def apbsWriteBindingEnergy(self, fp):
'None <--- apbsWriteBindingEnergy(fp)\n\n Writes APBS Binding Energy Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write(('\tmol pqr %s\n' % self.molecule2Path))
fp.write(('\tmol pqr %s\n' % self.complexPath))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 2\n')
(file_name, ext) = os.path.splitext(self.molecule2Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 3\n')
(file_name, ext) = os.path.splitext(self.complexPath)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 3 - 2 - 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')<|docstring|>None <--- apbsWriteBindingEnergy(fp)
Writes APBS Binding Energy Parameters into fp<|endoftext|>
|
879e1b9af8626af50aac5d176e80975173c33fba8f38240b3e10e90e601436da
|
def apbsWriteElectrostaticPotential(self, fp):
'None <--- apbsWriteElectrostaticPotential(fp)\n\n Writes APBS Electrostatic Potential Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
None <--- apbsWriteElectrostaticPotential(fp)
Writes APBS Electrostatic Potential Parameters into fp
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
apbsWriteElectrostaticPotential
|
e-mayo/mscreen
| 9
|
python
|
def apbsWriteElectrostaticPotential(self, fp):
'None <--- apbsWriteElectrostaticPotential(fp)\n\n Writes APBS Electrostatic Potential Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')
|
def apbsWriteElectrostaticPotential(self, fp):
'None <--- apbsWriteElectrostaticPotential(fp)\n\n Writes APBS Electrostatic Potential Parameters into fp\n\n '
fp.write('READ\n')
fp.write(('\tmol pqr %s\n' % self.molecule1Path))
fp.write('END\n\n')
fp.write('ELEC\n')
fp.write('\tmg-auto\n')
fp.write('\tmol 1\n')
(file_name, ext) = os.path.splitext(self.molecule1Path)
mol_name = os.path.split(file_name)[(- 1)]
self.apbsWriteCalculationParams(fp, mol_name)
self.apbsWriteGridParams(fp)
self.apbsWritePhysicsParams(fp)
fp.write('END\n\n')
fp.write('PRINT\n')
fp.write('\telecEnergy 1\n')
fp.write('END\n\n')
fp.write('QUIT\n')<|docstring|>None <--- apbsWriteElectrostaticPotential(fp)
Writes APBS Electrostatic Potential Parameters into fp<|endoftext|>
|
568da78654c937d70cda33b742360765257b4d03f12220f5986651ab5cdcd552
|
def SaveAPBSInput(self, filename):
'None <--- apbsWriteElectrostaticPotential(filename)\n\n Saves APBS Input Parameters in a file named filename \n\n '
fp = open(filename, 'wb+')
if (self.calculationType == 'Solvation energy'):
self.apbsWriteSolvationEnergy(fp)
elif (self.calculationType == 'Binding energy'):
self.apbsWriteBindingEnergy(fp)
else:
self.apbsWriteElectrostaticPotential(fp)
fp.close()
|
None <--- apbsWriteElectrostaticPotential(filename)
Saves APBS Input Parameters in a file named filename
|
mscreen/autodocktools_prepare_py3k/MolKit/APBSParameters.py
|
SaveAPBSInput
|
e-mayo/mscreen
| 9
|
python
|
def SaveAPBSInput(self, filename):
'None <--- apbsWriteElectrostaticPotential(filename)\n\n Saves APBS Input Parameters in a file named filename \n\n '
fp = open(filename, 'wb+')
if (self.calculationType == 'Solvation energy'):
self.apbsWriteSolvationEnergy(fp)
elif (self.calculationType == 'Binding energy'):
self.apbsWriteBindingEnergy(fp)
else:
self.apbsWriteElectrostaticPotential(fp)
fp.close()
|
def SaveAPBSInput(self, filename):
'None <--- apbsWriteElectrostaticPotential(filename)\n\n Saves APBS Input Parameters in a file named filename \n\n '
fp = open(filename, 'wb+')
if (self.calculationType == 'Solvation energy'):
self.apbsWriteSolvationEnergy(fp)
elif (self.calculationType == 'Binding energy'):
self.apbsWriteBindingEnergy(fp)
else:
self.apbsWriteElectrostaticPotential(fp)
fp.close()<|docstring|>None <--- apbsWriteElectrostaticPotential(filename)
Saves APBS Input Parameters in a file named filename<|endoftext|>
|
994ce7b25c5af3633723f8419914f3cf864399ad4e338568ff3b72d4f2ab7a12
|
def test_true_is_true(self):
' Tests if True is equal to True. Should always pass. '
self.assertEqual(True, True)
|
Tests if True is equal to True. Should always pass.
|
wiki/tests.py
|
test_true_is_true
|
caocmai/makewiki_v2
| 0
|
python
|
def test_true_is_true(self):
' '
self.assertEqual(True, True)
|
def test_true_is_true(self):
' '
self.assertEqual(True, True)<|docstring|>Tests if True is equal to True. Should always pass.<|endoftext|>
|
a99068d286ba67b1ce28c3ba7ecf26c1edb0164fd56f18295414fce351b16665
|
def test_page_slugify_on_save(self):
' Test the slug generated when saving a Page. '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page')
|
Test the slug generated when saving a Page.
|
wiki/tests.py
|
test_page_slugify_on_save
|
caocmai/makewiki_v2
| 0
|
python
|
def test_page_slugify_on_save(self):
' '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page')
|
def test_page_slugify_on_save(self):
' '
user = User()
user.save()
page = Page(title='My Test Page', content='test', author=user)
page.save()
self.assertEqual(page.slug, 'my-test-page')<|docstring|>Test the slug generated when saving a Page.<|endoftext|>
|
5d72aae64f68ca76e83ad024687ecade634b25109ce6705734c4149bc7cfd71c
|
def test_create_wiki_page(self):
' Test so that the form page load when visiting create '
response = self.client.get('/new_wiki/')
self.assertEqual(response.status_code, 200)
|
Test so that the form page load when visiting create
|
wiki/tests.py
|
test_create_wiki_page
|
caocmai/makewiki_v2
| 0
|
python
|
def test_create_wiki_page(self):
' '
response = self.client.get('/new_wiki/')
self.assertEqual(response.status_code, 200)
|
def test_create_wiki_page(self):
' '
response = self.client.get('/new_wiki/')
self.assertEqual(response.status_code, 200)<|docstring|>Test so that the form page load when visiting create<|endoftext|>
|
0f9ac0a024370f676926e9360733780d8c2c2de7a1d40893a0053f9523b2230a
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_ironic')
def periodic_synchronize_ironic(self):
'Synchronize the state of Ironic with Netbox'
openstack.baremetal_node_list.apply_async((), link=synchronize_device_state.s())
|
Synchronize the state of Ironic with Netbox
|
osism/tasks/netbox.py
|
periodic_synchronize_ironic
|
osism/python-osism
| 0
|
python
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_ironic')
def periodic_synchronize_ironic(self):
openstack.baremetal_node_list.apply_async((), link=synchronize_device_state.s())
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_ironic')
def periodic_synchronize_ironic(self):
openstack.baremetal_node_list.apply_async((), link=synchronize_device_state.s())<|docstring|>Synchronize the state of Ironic with Netbox<|endoftext|>
|
6067fab4b9cd6a6e8b30294f9bce814705f3aeb9f7cf878df8cf2ef85b9243cf
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_bifrost')
def periodic_synchronize_bifrost(self):
'Synchronize the state of Bifrost with Netbox'
ansible.run.apply_async(('manager', 'bifrost-command', 'baremetal node list -f json'), link=synchronize_device_state.s())
|
Synchronize the state of Bifrost with Netbox
|
osism/tasks/netbox.py
|
periodic_synchronize_bifrost
|
osism/python-osism
| 0
|
python
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_bifrost')
def periodic_synchronize_bifrost(self):
ansible.run.apply_async(('manager', 'bifrost-command', 'baremetal node list -f json'), link=synchronize_device_state.s())
|
@app.task(bind=True, name='osism.tasks.netbox.periodic_synchronize_bifrost')
def periodic_synchronize_bifrost(self):
ansible.run.apply_async(('manager', 'bifrost-command', 'baremetal node list -f json'), link=synchronize_device_state.s())<|docstring|>Synchronize the state of Bifrost with Netbox<|endoftext|>
|
4243f751f34363a2c8eac94470d959c83fb0062b8461daa64017722b76962539
|
@app.task(bind=True, name='osism.tasks.netbox.synchronize_device_state')
def synchronize_device_state(self, data):
'Synchronize the state of Bifrost or Ironic with Netbox'
if (type(data) == str):
data = json.loads(data)
if (not data):
return
for device in data:
manage_device.set_provision_state(device['Name'], device['Provisioning State'])
manage_device.set_power_state(device['Name'], device['Power State'])
|
Synchronize the state of Bifrost or Ironic with Netbox
|
osism/tasks/netbox.py
|
synchronize_device_state
|
osism/python-osism
| 0
|
python
|
@app.task(bind=True, name='osism.tasks.netbox.synchronize_device_state')
def synchronize_device_state(self, data):
if (type(data) == str):
data = json.loads(data)
if (not data):
return
for device in data:
manage_device.set_provision_state(device['Name'], device['Provisioning State'])
manage_device.set_power_state(device['Name'], device['Power State'])
|
@app.task(bind=True, name='osism.tasks.netbox.synchronize_device_state')
def synchronize_device_state(self, data):
if (type(data) == str):
data = json.loads(data)
if (not data):
return
for device in data:
manage_device.set_provision_state(device['Name'], device['Provisioning State'])
manage_device.set_power_state(device['Name'], device['Power State'])<|docstring|>Synchronize the state of Bifrost or Ironic with Netbox<|endoftext|>
|
11ba9f3d64577a36cc940bacb76849eb92c9860e71e9859205ca1fdb9e660468
|
def _fit(self, train_data, val_data):
'Fit Faster R-CNN model.'
self._best_map = 0
self.epoch = 0
self._time_elapsed = 0
if (max(self._cfg.train.start_epoch, self.epoch) >= self._cfg.train.epochs):
return {'time', self._time_elapsed}
self.net.collect_params().setattr('grad_req', 'null')
self.net.collect_train_params().setattr('grad_req', 'write')
self._init_trainer()
return self._resume_fit(train_data, val_data)
|
Fit Faster R-CNN model.
|
gluoncv/auto/estimators/faster_rcnn/faster_rcnn.py
|
_fit
|
siriusmehta/gluon-cv
| 1
|
python
|
def _fit(self, train_data, val_data):
self._best_map = 0
self.epoch = 0
self._time_elapsed = 0
if (max(self._cfg.train.start_epoch, self.epoch) >= self._cfg.train.epochs):
return {'time', self._time_elapsed}
self.net.collect_params().setattr('grad_req', 'null')
self.net.collect_train_params().setattr('grad_req', 'write')
self._init_trainer()
return self._resume_fit(train_data, val_data)
|
def _fit(self, train_data, val_data):
self._best_map = 0
self.epoch = 0
self._time_elapsed = 0
if (max(self._cfg.train.start_epoch, self.epoch) >= self._cfg.train.epochs):
return {'time', self._time_elapsed}
self.net.collect_params().setattr('grad_req', 'null')
self.net.collect_train_params().setattr('grad_req', 'write')
self._init_trainer()
return self._resume_fit(train_data, val_data)<|docstring|>Fit Faster R-CNN model.<|endoftext|>
|
5d35d75b36dce0cbfa9935948bd78a893ffbd08c75664871a0582aaf6effde7d
|
def _evaluate(self, val_data):
'Evaluate on validation dataset.'
clipper = BBoxClipToImage()
if (not isinstance(val_data, gluon.data.DataLoader)):
if hasattr(val_data, 'to_mxnet'):
val_data = val_data.to_mxnet()
val_bfn = Tuple(*[Append() for _ in range(3)])
short = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
val_data = gluon.data.DataLoader(val_data.transform(FasterRCNNDefaultValTransform(short, self.net.max_size)), len(self.ctx), False, batchify_fn=val_bfn, last_batch='keep', num_workers=self._cfg.num_workers)
if (self._cfg.valid.metric == 'voc07'):
eval_metric = VOC07MApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
elif (self._cfg.valid.metric == 'voc'):
eval_metric = VOCMApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
else:
raise ValueError(f'Invalid metric type: {self._cfg.valid.metric}')
if (not self._cfg.disable_hybridization):
self.net.hybridize(static_alloc=self._cfg.faster_rcnn.static_alloc)
for batch in val_data:
batch = _split_and_load(batch, ctx_list=self.ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y, im_scale) in zip(*batch):
(ids, scores, bboxes) = self.net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(clipper(bboxes, x))
im_scale = im_scale.reshape((- 1)).asscalar()
det_bboxes[(- 1)] *= im_scale
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_bboxes[(- 1)] *= im_scale
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
for (det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff) in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
return eval_metric.get()
|
Evaluate on validation dataset.
|
gluoncv/auto/estimators/faster_rcnn/faster_rcnn.py
|
_evaluate
|
siriusmehta/gluon-cv
| 1
|
python
|
def _evaluate(self, val_data):
clipper = BBoxClipToImage()
if (not isinstance(val_data, gluon.data.DataLoader)):
if hasattr(val_data, 'to_mxnet'):
val_data = val_data.to_mxnet()
val_bfn = Tuple(*[Append() for _ in range(3)])
short = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
val_data = gluon.data.DataLoader(val_data.transform(FasterRCNNDefaultValTransform(short, self.net.max_size)), len(self.ctx), False, batchify_fn=val_bfn, last_batch='keep', num_workers=self._cfg.num_workers)
if (self._cfg.valid.metric == 'voc07'):
eval_metric = VOC07MApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
elif (self._cfg.valid.metric == 'voc'):
eval_metric = VOCMApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
else:
raise ValueError(f'Invalid metric type: {self._cfg.valid.metric}')
if (not self._cfg.disable_hybridization):
self.net.hybridize(static_alloc=self._cfg.faster_rcnn.static_alloc)
for batch in val_data:
batch = _split_and_load(batch, ctx_list=self.ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y, im_scale) in zip(*batch):
(ids, scores, bboxes) = self.net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(clipper(bboxes, x))
im_scale = im_scale.reshape((- 1)).asscalar()
det_bboxes[(- 1)] *= im_scale
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_bboxes[(- 1)] *= im_scale
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
for (det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff) in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
return eval_metric.get()
|
def _evaluate(self, val_data):
clipper = BBoxClipToImage()
if (not isinstance(val_data, gluon.data.DataLoader)):
if hasattr(val_data, 'to_mxnet'):
val_data = val_data.to_mxnet()
val_bfn = Tuple(*[Append() for _ in range(3)])
short = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
val_data = gluon.data.DataLoader(val_data.transform(FasterRCNNDefaultValTransform(short, self.net.max_size)), len(self.ctx), False, batchify_fn=val_bfn, last_batch='keep', num_workers=self._cfg.num_workers)
if (self._cfg.valid.metric == 'voc07'):
eval_metric = VOC07MApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
elif (self._cfg.valid.metric == 'voc'):
eval_metric = VOCMApMetric(iou_thresh=self._cfg.valid.iou_thresh, class_names=self.classes)
else:
raise ValueError(f'Invalid metric type: {self._cfg.valid.metric}')
if (not self._cfg.disable_hybridization):
self.net.hybridize(static_alloc=self._cfg.faster_rcnn.static_alloc)
for batch in val_data:
batch = _split_and_load(batch, ctx_list=self.ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for (x, y, im_scale) in zip(*batch):
(ids, scores, bboxes) = self.net(x)
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(clipper(bboxes, x))
im_scale = im_scale.reshape((- 1)).asscalar()
det_bboxes[(- 1)] *= im_scale
gt_ids.append(y.slice_axis(axis=(- 1), begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=(- 1), begin=0, end=4))
gt_bboxes[(- 1)] *= im_scale
gt_difficults.append((y.slice_axis(axis=(- 1), begin=5, end=6) if (y.shape[(- 1)] > 5) else None))
for (det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff) in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
return eval_metric.get()<|docstring|>Evaluate on validation dataset.<|endoftext|>
|
baeb6aaf94d82ae9f7ef884812d96ecfab6135978b3a631ac2fb6308dd68c1bf
|
def _predict(self, x):
'Predict an individual example.'
short_size = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
if isinstance(x, str):
x = load_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, mx.nd.NDArray):
x = transform_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, pd.DataFrame):
assert ('image' in x.columns), 'Expect column `image` for input images'
def _predict_merge(x):
y = self._predict(x)
y['image'] = x
return y
return pd.concat([_predict_merge(xx) for xx in x['image']]).reset_index(drop=True)
else:
raise ValueError('Input is not supported: {}'.format(type(x)))
(height, width) = x.shape[2:4]
x = x.as_in_context(self.ctx[0])
(ids, scores, bboxes) = [xx[0].asnumpy() for xx in self.net(x)]
bboxes[(:, (0, 2))] /= width
bboxes[(:, (1, 3))] /= height
bboxes = np.clip(bboxes, 0.0, 1.0).tolist()
df = pd.DataFrame({'predict_class': [self.classes[int(id)] for id in ids], 'predict_score': scores.flatten(), 'predict_rois': [{'xmin': bbox[0], 'ymin': bbox[1], 'xmax': bbox[2], 'ymax': bbox[3]} for bbox in bboxes]})
valid_df = df[(df['predict_score'] > 0)].reset_index(drop=True)
return valid_df
|
Predict an individual example.
|
gluoncv/auto/estimators/faster_rcnn/faster_rcnn.py
|
_predict
|
siriusmehta/gluon-cv
| 1
|
python
|
def _predict(self, x):
short_size = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
if isinstance(x, str):
x = load_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, mx.nd.NDArray):
x = transform_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, pd.DataFrame):
assert ('image' in x.columns), 'Expect column `image` for input images'
def _predict_merge(x):
y = self._predict(x)
y['image'] = x
return y
return pd.concat([_predict_merge(xx) for xx in x['image']]).reset_index(drop=True)
else:
raise ValueError('Input is not supported: {}'.format(type(x)))
(height, width) = x.shape[2:4]
x = x.as_in_context(self.ctx[0])
(ids, scores, bboxes) = [xx[0].asnumpy() for xx in self.net(x)]
bboxes[(:, (0, 2))] /= width
bboxes[(:, (1, 3))] /= height
bboxes = np.clip(bboxes, 0.0, 1.0).tolist()
df = pd.DataFrame({'predict_class': [self.classes[int(id)] for id in ids], 'predict_score': scores.flatten(), 'predict_rois': [{'xmin': bbox[0], 'ymin': bbox[1], 'xmax': bbox[2], 'ymax': bbox[3]} for bbox in bboxes]})
valid_df = df[(df['predict_score'] > 0)].reset_index(drop=True)
return valid_df
|
def _predict(self, x):
short_size = (self.net.short[(- 1)] if isinstance(self.net.short, (tuple, list)) else self.net.short)
if isinstance(x, str):
x = load_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, mx.nd.NDArray):
x = transform_test(x, short=short_size, max_size=1024)[0]
elif isinstance(x, pd.DataFrame):
assert ('image' in x.columns), 'Expect column `image` for input images'
def _predict_merge(x):
y = self._predict(x)
y['image'] = x
return y
return pd.concat([_predict_merge(xx) for xx in x['image']]).reset_index(drop=True)
else:
raise ValueError('Input is not supported: {}'.format(type(x)))
(height, width) = x.shape[2:4]
x = x.as_in_context(self.ctx[0])
(ids, scores, bboxes) = [xx[0].asnumpy() for xx in self.net(x)]
bboxes[(:, (0, 2))] /= width
bboxes[(:, (1, 3))] /= height
bboxes = np.clip(bboxes, 0.0, 1.0).tolist()
df = pd.DataFrame({'predict_class': [self.classes[int(id)] for id in ids], 'predict_score': scores.flatten(), 'predict_rois': [{'xmin': bbox[0], 'ymin': bbox[1], 'xmax': bbox[2], 'ymax': bbox[3]} for bbox in bboxes]})
valid_df = df[(df['predict_score'] > 0)].reset_index(drop=True)
return valid_df<|docstring|>Predict an individual example.<|endoftext|>
|
364b721858e99a1d68074f38b1a2af55ed61249f18d92196c89e770aa3a9c18d
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT])
def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
'Test if eviction works correctly when remapping cachelines between distinct cores.'
cache_device = RamVolume(Size.from_MiB(50))
core_device1 = RamVolume(Size.from_MiB(40))
core_device2 = RamVolume(Size.from_MiB(40))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
core1 = Core.using_device(core_device1, name='core1')
core2 = Core.using_device(core_device2, name='core2')
cache.add_core(core1)
vol1 = CoreVolume(core1, open=True)
cache.add_core(core2)
vol2 = CoreVolume(core2, open=True)
valid_io_size = Size.from_B(cache_size.B)
test_data = Data(valid_io_size)
send_io(core1, test_data)
send_io(core2, test_data)
stats1 = core1.get_stats()
stats2 = core2.get_stats()
assert (stats1['usage']['occupancy']['value'] == 0)
assert (stats2['usage']['occupancy']['value'] == valid_io_size.blocks_4k)
|
Test if eviction works correctly when remapping cachelines between distinct cores.
|
tests/functional/tests/eviction/test_eviction.py
|
test_eviction_two_cores
|
hammerg/ocf
| 0
|
python
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT])
def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
cache_device = RamVolume(Size.from_MiB(50))
core_device1 = RamVolume(Size.from_MiB(40))
core_device2 = RamVolume(Size.from_MiB(40))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
core1 = Core.using_device(core_device1, name='core1')
core2 = Core.using_device(core_device2, name='core2')
cache.add_core(core1)
vol1 = CoreVolume(core1, open=True)
cache.add_core(core2)
vol2 = CoreVolume(core2, open=True)
valid_io_size = Size.from_B(cache_size.B)
test_data = Data(valid_io_size)
send_io(core1, test_data)
send_io(core2, test_data)
stats1 = core1.get_stats()
stats2 = core2.get_stats()
assert (stats1['usage']['occupancy']['value'] == 0)
assert (stats2['usage']['occupancy']['value'] == valid_io_size.blocks_4k)
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT])
def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
cache_device = RamVolume(Size.from_MiB(50))
core_device1 = RamVolume(Size.from_MiB(40))
core_device2 = RamVolume(Size.from_MiB(40))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
core1 = Core.using_device(core_device1, name='core1')
core2 = Core.using_device(core_device2, name='core2')
cache.add_core(core1)
vol1 = CoreVolume(core1, open=True)
cache.add_core(core2)
vol2 = CoreVolume(core2, open=True)
valid_io_size = Size.from_B(cache_size.B)
test_data = Data(valid_io_size)
send_io(core1, test_data)
send_io(core2, test_data)
stats1 = core1.get_stats()
stats2 = core2.get_stats()
assert (stats1['usage']['occupancy']['value'] == 0)
assert (stats2['usage']['occupancy']['value'] == valid_io_size.blocks_4k)<|docstring|>Test if eviction works correctly when remapping cachelines between distinct cores.<|endoftext|>
|
62c22b737de434fc0e197534ba8f328e14fddc2c0eaeddde51db35d794513001
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT, CacheMode.WB, CacheMode.WO])
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
'Test if eviction does not occur when IO greater than cache size is submitted.'
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache_size = cache.get_stats()['conf']['size']
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
valid_io_size = Size.from_B((cache_size.B // 2))
test_data = Data(valid_io_size)
send_io(core, test_data)
stats = core.cache.get_stats()
first_block_sts = stats['block']
first_usage_sts = stats['usage']
pt_writes_first = stats['req']['wr_pt']
assert (stats['usage']['occupancy']['value'] == (valid_io_size.B / Size.from_KiB(4).B)), 'Occupancy after first IO'
prev_writes_to_core = stats['block']['core_volume_wr']['value']
io_size_bigger_than_cache = Size.from_MiB(100)
io_offset = valid_io_size
test_data = Data(io_size_bigger_than_cache)
send_io(core, test_data, io_offset)
if (mode is not CacheMode.WT):
cache.flush()
stats = core.cache.get_stats()
second_block_sts = stats['block']
second_usage_sts = stats['usage']
pt_writes_second = stats['req']['wr_pt']
assert (first_usage_sts['occupancy'] == second_usage_sts['occupancy'])
assert (pt_writes_first['value'] == 0)
assert (pt_writes_second['value'] == 1)
assert (second_block_sts['cache_volume_wr']['value'] == valid_io_size.blocks_4k)
assert (second_block_sts['core_volume_wr']['value'] == (valid_io_size.blocks_4k + io_size_bigger_than_cache.blocks_4k))
|
Test if eviction does not occur when IO greater than cache size is submitted.
|
tests/functional/tests/eviction/test_eviction.py
|
test_write_size_greater_than_cache
|
hammerg/ocf
| 0
|
python
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT, CacheMode.WB, CacheMode.WO])
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache_size = cache.get_stats()['conf']['size']
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
valid_io_size = Size.from_B((cache_size.B // 2))
test_data = Data(valid_io_size)
send_io(core, test_data)
stats = core.cache.get_stats()
first_block_sts = stats['block']
first_usage_sts = stats['usage']
pt_writes_first = stats['req']['wr_pt']
assert (stats['usage']['occupancy']['value'] == (valid_io_size.B / Size.from_KiB(4).B)), 'Occupancy after first IO'
prev_writes_to_core = stats['block']['core_volume_wr']['value']
io_size_bigger_than_cache = Size.from_MiB(100)
io_offset = valid_io_size
test_data = Data(io_size_bigger_than_cache)
send_io(core, test_data, io_offset)
if (mode is not CacheMode.WT):
cache.flush()
stats = core.cache.get_stats()
second_block_sts = stats['block']
second_usage_sts = stats['usage']
pt_writes_second = stats['req']['wr_pt']
assert (first_usage_sts['occupancy'] == second_usage_sts['occupancy'])
assert (pt_writes_first['value'] == 0)
assert (pt_writes_second['value'] == 1)
assert (second_block_sts['cache_volume_wr']['value'] == valid_io_size.blocks_4k)
assert (second_block_sts['core_volume_wr']['value'] == (valid_io_size.blocks_4k + io_size_bigger_than_cache.blocks_4k))
|
@pytest.mark.parametrize('cls', CacheLineSize)
@pytest.mark.parametrize('mode', [CacheMode.WT, CacheMode.WB, CacheMode.WO])
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache_size = cache.get_stats()['conf']['size']
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
valid_io_size = Size.from_B((cache_size.B // 2))
test_data = Data(valid_io_size)
send_io(core, test_data)
stats = core.cache.get_stats()
first_block_sts = stats['block']
first_usage_sts = stats['usage']
pt_writes_first = stats['req']['wr_pt']
assert (stats['usage']['occupancy']['value'] == (valid_io_size.B / Size.from_KiB(4).B)), 'Occupancy after first IO'
prev_writes_to_core = stats['block']['core_volume_wr']['value']
io_size_bigger_than_cache = Size.from_MiB(100)
io_offset = valid_io_size
test_data = Data(io_size_bigger_than_cache)
send_io(core, test_data, io_offset)
if (mode is not CacheMode.WT):
cache.flush()
stats = core.cache.get_stats()
second_block_sts = stats['block']
second_usage_sts = stats['usage']
pt_writes_second = stats['req']['wr_pt']
assert (first_usage_sts['occupancy'] == second_usage_sts['occupancy'])
assert (pt_writes_first['value'] == 0)
assert (pt_writes_second['value'] == 1)
assert (second_block_sts['cache_volume_wr']['value'] == valid_io_size.blocks_4k)
assert (second_block_sts['core_volume_wr']['value'] == (valid_io_size.blocks_4k + io_size_bigger_than_cache.blocks_4k))<|docstring|>Test if eviction does not occur when IO greater than cache size is submitted.<|endoftext|>
|
1bd1633a92a327cd5c1d8cfa7771e4cb398e5c2d5f86652a0d70cf2216081b74
|
@pytest.mark.parametrize('cls', CacheLineSize)
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
' Verify if overflown pinned ioclass is evicted '
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(100))
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
test_ioclass_id = 1
pinned_ioclass_id = 2
pinned_ioclass_max_occupancy = 10
cache.configure_partition(part_id=test_ioclass_id, name='default_ioclass', max_size=100, priority=1)
cache.configure_partition(part_id=pinned_ioclass_id, name='pinned_ioclass', max_size=pinned_ioclass_max_occupancy, priority=(- 1))
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
data = Data(4096)
for i in range(cache_size.blocks_4k):
send_io(core, data, (i * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=test_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, cache_size.blocks_4k, abs_tol=Size(cls).blocks_4k), 'Failed to populate the default partition'
pinned_double_size = ceil((((cache_size.blocks_4k * pinned_ioclass_max_occupancy) * 2) / 100))
for i in range(pinned_double_size):
send_io(core, data, (i * 4096), pinned_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, pinned_double_size, abs_tol=Size(cls).blocks_4k), "Occupancy of pinned ioclass doesn't match expected value"
for i in range(cache_size.blocks_4k):
send_io(core, data, ((cache_size.blocks_4k + i) * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, ceil((cache_size.blocks_4k * 0.1)), abs_tol=Size(cls).blocks_4k), 'Overflown part has not been evicted'
|
Verify if overflown pinned ioclass is evicted
|
tests/functional/tests/eviction/test_eviction.py
|
test_evict_overflown_pinned
|
hammerg/ocf
| 0
|
python
|
@pytest.mark.parametrize('cls', CacheLineSize)
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
' '
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(100))
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
test_ioclass_id = 1
pinned_ioclass_id = 2
pinned_ioclass_max_occupancy = 10
cache.configure_partition(part_id=test_ioclass_id, name='default_ioclass', max_size=100, priority=1)
cache.configure_partition(part_id=pinned_ioclass_id, name='pinned_ioclass', max_size=pinned_ioclass_max_occupancy, priority=(- 1))
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
data = Data(4096)
for i in range(cache_size.blocks_4k):
send_io(core, data, (i * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=test_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, cache_size.blocks_4k, abs_tol=Size(cls).blocks_4k), 'Failed to populate the default partition'
pinned_double_size = ceil((((cache_size.blocks_4k * pinned_ioclass_max_occupancy) * 2) / 100))
for i in range(pinned_double_size):
send_io(core, data, (i * 4096), pinned_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, pinned_double_size, abs_tol=Size(cls).blocks_4k), "Occupancy of pinned ioclass doesn't match expected value"
for i in range(cache_size.blocks_4k):
send_io(core, data, ((cache_size.blocks_4k + i) * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, ceil((cache_size.blocks_4k * 0.1)), abs_tol=Size(cls).blocks_4k), 'Overflown part has not been evicted'
|
@pytest.mark.parametrize('cls', CacheLineSize)
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
' '
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(100))
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core, open=True)
test_ioclass_id = 1
pinned_ioclass_id = 2
pinned_ioclass_max_occupancy = 10
cache.configure_partition(part_id=test_ioclass_id, name='default_ioclass', max_size=100, priority=1)
cache.configure_partition(part_id=pinned_ioclass_id, name='pinned_ioclass', max_size=pinned_ioclass_max_occupancy, priority=(- 1))
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()['conf']['size']
data = Data(4096)
for i in range(cache_size.blocks_4k):
send_io(core, data, (i * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=test_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, cache_size.blocks_4k, abs_tol=Size(cls).blocks_4k), 'Failed to populate the default partition'
pinned_double_size = ceil((((cache_size.blocks_4k * pinned_ioclass_max_occupancy) * 2) / 100))
for i in range(pinned_double_size):
send_io(core, data, (i * 4096), pinned_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, pinned_double_size, abs_tol=Size(cls).blocks_4k), "Occupancy of pinned ioclass doesn't match expected value"
for i in range(cache_size.blocks_4k):
send_io(core, data, ((cache_size.blocks_4k + i) * 4096), test_ioclass_id)
part_current_size = CacheLines(cache.get_partition_info(part_id=pinned_ioclass_id)['_curr_size'], cls)
assert isclose(part_current_size.blocks_4k, ceil((cache_size.blocks_4k * 0.1)), abs_tol=Size(cls).blocks_4k), 'Overflown part has not been evicted'<|docstring|>Verify if overflown pinned ioclass is evicted<|endoftext|>
|
578852ec22467d4592c1683faac0edb12419dfa63e5e276ff1302e3080b6fbaa
|
@classmethod
def from_file(cls, file) -> 'CairoPie':
'\n Loads an instance of CairoPie from a file.\n file can be a path or a file object.\n '
with zipfile.ZipFile(file) as zf:
with zf.open(cls.METADATA_FILENAME, 'r') as fp:
metadata = CairoPieMetadata.Schema().load(json.loads(fp.read().decode('ascii')))
with zf.open(cls.MEMORY_FILENAME, 'r') as fp:
memory = MemoryDict.deserialize(data=fp.read(), field_bytes=metadata.field_bytes)
with zf.open(cls.ADDITIONAL_DATA_FILENAME, 'r') as fp:
additional_data = json.loads(fp.read().decode('ascii'))
with zf.open(cls.EXECUTION_RESOURCES_FILENAME, 'r') as fp:
execution_resources = ExecutionResources.Schema().load(json.loads(fp.read().decode('ascii')))
return CairoPie(metadata, memory, additional_data, execution_resources)
|
Loads an instance of CairoPie from a file.
file can be a path or a file object.
|
examples/starkex-cairo/starkware/cairo/lang/vm/cairo_pie.py
|
from_file
|
LatticeLabVentures/BeamNet
| 0
|
python
|
@classmethod
def from_file(cls, file) -> 'CairoPie':
'\n Loads an instance of CairoPie from a file.\n file can be a path or a file object.\n '
with zipfile.ZipFile(file) as zf:
with zf.open(cls.METADATA_FILENAME, 'r') as fp:
metadata = CairoPieMetadata.Schema().load(json.loads(fp.read().decode('ascii')))
with zf.open(cls.MEMORY_FILENAME, 'r') as fp:
memory = MemoryDict.deserialize(data=fp.read(), field_bytes=metadata.field_bytes)
with zf.open(cls.ADDITIONAL_DATA_FILENAME, 'r') as fp:
additional_data = json.loads(fp.read().decode('ascii'))
with zf.open(cls.EXECUTION_RESOURCES_FILENAME, 'r') as fp:
execution_resources = ExecutionResources.Schema().load(json.loads(fp.read().decode('ascii')))
return CairoPie(metadata, memory, additional_data, execution_resources)
|
@classmethod
def from_file(cls, file) -> 'CairoPie':
'\n Loads an instance of CairoPie from a file.\n file can be a path or a file object.\n '
with zipfile.ZipFile(file) as zf:
with zf.open(cls.METADATA_FILENAME, 'r') as fp:
metadata = CairoPieMetadata.Schema().load(json.loads(fp.read().decode('ascii')))
with zf.open(cls.MEMORY_FILENAME, 'r') as fp:
memory = MemoryDict.deserialize(data=fp.read(), field_bytes=metadata.field_bytes)
with zf.open(cls.ADDITIONAL_DATA_FILENAME, 'r') as fp:
additional_data = json.loads(fp.read().decode('ascii'))
with zf.open(cls.EXECUTION_RESOURCES_FILENAME, 'r') as fp:
execution_resources = ExecutionResources.Schema().load(json.loads(fp.read().decode('ascii')))
return CairoPie(metadata, memory, additional_data, execution_resources)<|docstring|>Loads an instance of CairoPie from a file.
file can be a path or a file object.<|endoftext|>
|
4bf5d174c777f1a865f98be9565cb7371a1b784b9da81791a484e0bad9bc5bf0
|
def main():
'\n Simple usage examples and tests.\n '
config = {'hosts': [('172.28.128.4', 3000)], 'policies': {'timeout': 1200}}
QUEUE_MAX_SIZE = 1000
ENTRIES_PER_REC = 100
client = aerospike.client(config).connect()
msg_queue = LargeQueue()
try:
msg_queue.initialize_existing_queue(client, 'test', 'shared-msg-bus')
except ASAborted as ex:
msg_queue.create_new_queue(client, 'test', 'shared-msg-bus', QUEUE_MAX_SIZE, ENTRIES_PER_REC)
txn_id = 1
msg = {'msg-id': 100, 'msg-text': 'The school is closed for summer.'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 100 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
msg = {'msg-id': 101, 'msg-text': 'Have a nice summer!'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 101 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('queue status: {}'.format(q_info))
offset = (q_info['head-offset'] + 1)
msg_entry = msg_queue.get_entry_at_offset(offset)
print('entry at offset {}: {}'.format(offset, msg_entry))
try:
msg_entry = msg_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(msg_entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('end status: {}'.format(q_info))
client = aerospike.client(config).connect()
test_queue = LargeQueue()
try:
test_queue.initialize_existing_queue(client, 'test', 'test_queue')
except ASAborted as ex:
test_queue.create_new_queue(client, 'test', 'test_queue', 10, 3)
txn_id = 111
try:
entry = test_queue.dequeue(txn_id)
print('found: {}'.format(entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
entry = 999
try:
offset = test_queue.enqueue(msg, txn_id)
print('added: {} at offset: {}'.format(entry, offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
try:
out = test_queue.dequeue(txn_id)
print('dequeued: {}'.format(out))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
print('adding without overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('adding with overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id, True)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('get info')
info = test_queue.get_queue_info()
print('info: {}'.format(info))
print('get entries at offset')
for i in range(info['head-offset'], info['tail-offset']):
entry = test_queue.get_entry_at_offset(i)
print('at offset {} got entry {}'.format(i, entry))
print('dequeue all entries')
while True:
try:
entry = test_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(entry))
if (entry is None):
print('done')
break
except ASAborted as ex:
print('aborted: reason: {}'.format(ex.reason))
break
exit(0)
|
Simple usage examples and tests.
|
large_queue.py
|
main
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def main():
'\n \n '
config = {'hosts': [('172.28.128.4', 3000)], 'policies': {'timeout': 1200}}
QUEUE_MAX_SIZE = 1000
ENTRIES_PER_REC = 100
client = aerospike.client(config).connect()
msg_queue = LargeQueue()
try:
msg_queue.initialize_existing_queue(client, 'test', 'shared-msg-bus')
except ASAborted as ex:
msg_queue.create_new_queue(client, 'test', 'shared-msg-bus', QUEUE_MAX_SIZE, ENTRIES_PER_REC)
txn_id = 1
msg = {'msg-id': 100, 'msg-text': 'The school is closed for summer.'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 100 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
msg = {'msg-id': 101, 'msg-text': 'Have a nice summer!'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 101 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('queue status: {}'.format(q_info))
offset = (q_info['head-offset'] + 1)
msg_entry = msg_queue.get_entry_at_offset(offset)
print('entry at offset {}: {}'.format(offset, msg_entry))
try:
msg_entry = msg_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(msg_entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('end status: {}'.format(q_info))
client = aerospike.client(config).connect()
test_queue = LargeQueue()
try:
test_queue.initialize_existing_queue(client, 'test', 'test_queue')
except ASAborted as ex:
test_queue.create_new_queue(client, 'test', 'test_queue', 10, 3)
txn_id = 111
try:
entry = test_queue.dequeue(txn_id)
print('found: {}'.format(entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
entry = 999
try:
offset = test_queue.enqueue(msg, txn_id)
print('added: {} at offset: {}'.format(entry, offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
try:
out = test_queue.dequeue(txn_id)
print('dequeued: {}'.format(out))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
print('adding without overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('adding with overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id, True)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('get info')
info = test_queue.get_queue_info()
print('info: {}'.format(info))
print('get entries at offset')
for i in range(info['head-offset'], info['tail-offset']):
entry = test_queue.get_entry_at_offset(i)
print('at offset {} got entry {}'.format(i, entry))
print('dequeue all entries')
while True:
try:
entry = test_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(entry))
if (entry is None):
print('done')
break
except ASAborted as ex:
print('aborted: reason: {}'.format(ex.reason))
break
exit(0)
|
def main():
'\n \n '
config = {'hosts': [('172.28.128.4', 3000)], 'policies': {'timeout': 1200}}
QUEUE_MAX_SIZE = 1000
ENTRIES_PER_REC = 100
client = aerospike.client(config).connect()
msg_queue = LargeQueue()
try:
msg_queue.initialize_existing_queue(client, 'test', 'shared-msg-bus')
except ASAborted as ex:
msg_queue.create_new_queue(client, 'test', 'shared-msg-bus', QUEUE_MAX_SIZE, ENTRIES_PER_REC)
txn_id = 1
msg = {'msg-id': 100, 'msg-text': 'The school is closed for summer.'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 100 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
msg = {'msg-id': 101, 'msg-text': 'Have a nice summer!'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 101 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('queue status: {}'.format(q_info))
offset = (q_info['head-offset'] + 1)
msg_entry = msg_queue.get_entry_at_offset(offset)
print('entry at offset {}: {}'.format(offset, msg_entry))
try:
msg_entry = msg_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(msg_entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
q_info = msg_queue.get_queue_info()
print('end status: {}'.format(q_info))
client = aerospike.client(config).connect()
test_queue = LargeQueue()
try:
test_queue.initialize_existing_queue(client, 'test', 'test_queue')
except ASAborted as ex:
test_queue.create_new_queue(client, 'test', 'test_queue', 10, 3)
txn_id = 111
try:
entry = test_queue.dequeue(txn_id)
print('found: {}'.format(entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
entry = 999
try:
offset = test_queue.enqueue(msg, txn_id)
print('added: {} at offset: {}'.format(entry, offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
try:
out = test_queue.dequeue(txn_id)
print('dequeued: {}'.format(out))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
print('adding without overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('adding with overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id, True)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('get info')
info = test_queue.get_queue_info()
print('info: {}'.format(info))
print('get entries at offset')
for i in range(info['head-offset'], info['tail-offset']):
entry = test_queue.get_entry_at_offset(i)
print('at offset {} got entry {}'.format(i, entry))
print('dequeue all entries')
while True:
try:
entry = test_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(entry))
if (entry is None):
print('done')
break
except ASAborted as ex:
print('aborted: reason: {}'.format(ex.reason))
break
exit(0)<|docstring|>Simple usage examples and tests.<|endoftext|>
|
808c1f52d0124950a0f644764c1c63657b38d46ffdbcf569a555a19b505fecd9
|
def __init__(self):
'\n The null constructor.\n '
self.client = None
self.namespace = None
self.name = None
self.slots_per_rec = None
self.num_buf_recs = None
self.initialized = False
|
The null constructor.
|
large_queue.py
|
__init__
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def __init__(self):
'\n \n '
self.client = None
self.namespace = None
self.name = None
self.slots_per_rec = None
self.num_buf_recs = None
self.initialized = False
|
def __init__(self):
'\n \n '
self.client = None
self.namespace = None
self.name = None
self.slots_per_rec = None
self.num_buf_recs = None
self.initialized = False<|docstring|>The null constructor.<|endoftext|>
|
fa5d49693819bde1bbbb7bf3897680c27564c82092f3711c5fc47739065e0322
|
@staticmethod
def _get_metadata(client, namespace, q_name):
'\n Get the metadata record.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace where the queue records are stored\n :param q_name: name of the queue, used as the "set" name\n :return: metadata record if queue exists, otherwise None\n '
metadata_key = (namespace, q_name, LargeQueue.META_REC_KEY)
try:
(key, meta, record) = client.get(metadata_key)
except exception.RecordNotFound as ex:
return None
return record
|
Get the metadata record.
:param client: client object returned by aerospike.connect()
:param namespace: namespace where the queue records are stored
:param q_name: name of the queue, used as the "set" name
:return: metadata record if queue exists, otherwise None
|
large_queue.py
|
_get_metadata
|
neelp-git/aerospike-large-queue
| 2
|
python
|
@staticmethod
def _get_metadata(client, namespace, q_name):
'\n Get the metadata record.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace where the queue records are stored\n :param q_name: name of the queue, used as the "set" name\n :return: metadata record if queue exists, otherwise None\n '
metadata_key = (namespace, q_name, LargeQueue.META_REC_KEY)
try:
(key, meta, record) = client.get(metadata_key)
except exception.RecordNotFound as ex:
return None
return record
|
@staticmethod
def _get_metadata(client, namespace, q_name):
'\n Get the metadata record.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace where the queue records are stored\n :param q_name: name of the queue, used as the "set" name\n :return: metadata record if queue exists, otherwise None\n '
metadata_key = (namespace, q_name, LargeQueue.META_REC_KEY)
try:
(key, meta, record) = client.get(metadata_key)
except exception.RecordNotFound as ex:
return None
return record<|docstring|>Get the metadata record.
:param client: client object returned by aerospike.connect()
:param namespace: namespace where the queue records are stored
:param q_name: name of the queue, used as the "set" name
:return: metadata record if queue exists, otherwise None<|endoftext|>
|
e1b209dd98157f2d2a7789c0430b63d355313ff323024a32681d4de24d9e6fb0
|
def get_queue_info(self):
'\n Get queue info.\n :return: a dict with externally visible attributes of the queue\n '
if (not self.initialized):
return None
record = LargeQueue._get_metadata(self.client, self.namespace, self.name)
return {'name': self.name, 'max-size': (self.num_buf_recs * self.slots_per_rec), 'namespace': self.namespace, 'head-offset': long(record['head-offset']), 'tail-offset': long(record['tail-offset'])}
|
Get queue info.
:return: a dict with externally visible attributes of the queue
|
large_queue.py
|
get_queue_info
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def get_queue_info(self):
'\n Get queue info.\n :return: a dict with externally visible attributes of the queue\n '
if (not self.initialized):
return None
record = LargeQueue._get_metadata(self.client, self.namespace, self.name)
return {'name': self.name, 'max-size': (self.num_buf_recs * self.slots_per_rec), 'namespace': self.namespace, 'head-offset': long(record['head-offset']), 'tail-offset': long(record['tail-offset'])}
|
def get_queue_info(self):
'\n Get queue info.\n :return: a dict with externally visible attributes of the queue\n '
if (not self.initialized):
return None
record = LargeQueue._get_metadata(self.client, self.namespace, self.name)
return {'name': self.name, 'max-size': (self.num_buf_recs * self.slots_per_rec), 'namespace': self.namespace, 'head-offset': long(record['head-offset']), 'tail-offset': long(record['tail-offset'])}<|docstring|>Get queue info.
:return: a dict with externally visible attributes of the queue<|endoftext|>
|
d4102f75997c17b9f5739534a9cbab8870b5456625f5ad4e0ca5b2bebb94205e
|
def _create_metadata_record(self):
"\n Create a metadata record for a new queue.\n :throws: ASAborted('Queue already exists')\n "
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE, 'key': aerospike.POLICY_KEY_SEND}
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = {'locked': 0, 'lock-owner': None, 'lock-time-ms': None, 'head-offset': 0, 'tail-offset': 0, 'fencing-ctr': 0, 'num-buf-recs': self.num_buf_recs, 'slots-per-rec': self.slots_per_rec}
try:
self.client.put(metadata_key, metadata_bins, write_policy)
except exception.RecordExistsError as ex:
raise ASAborted('Queue already exists')
return
|
Create a metadata record for a new queue.
:throws: ASAborted('Queue already exists')
|
large_queue.py
|
_create_metadata_record
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _create_metadata_record(self):
"\n Create a metadata record for a new queue.\n :throws: ASAborted('Queue already exists')\n "
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE, 'key': aerospike.POLICY_KEY_SEND}
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = {'locked': 0, 'lock-owner': None, 'lock-time-ms': None, 'head-offset': 0, 'tail-offset': 0, 'fencing-ctr': 0, 'num-buf-recs': self.num_buf_recs, 'slots-per-rec': self.slots_per_rec}
try:
self.client.put(metadata_key, metadata_bins, write_policy)
except exception.RecordExistsError as ex:
raise ASAborted('Queue already exists')
return
|
def _create_metadata_record(self):
"\n Create a metadata record for a new queue.\n :throws: ASAborted('Queue already exists')\n "
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE, 'key': aerospike.POLICY_KEY_SEND}
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = {'locked': 0, 'lock-owner': None, 'lock-time-ms': None, 'head-offset': 0, 'tail-offset': 0, 'fencing-ctr': 0, 'num-buf-recs': self.num_buf_recs, 'slots-per-rec': self.slots_per_rec}
try:
self.client.put(metadata_key, metadata_bins, write_policy)
except exception.RecordExistsError as ex:
raise ASAborted('Queue already exists')
return<|docstring|>Create a metadata record for a new queue.
:throws: ASAborted('Queue already exists')<|endoftext|>
|
e6e6d2e10719e2a327824f3c0ac76baf978d7bf34d2dcd8482eb3a7f7dc4623c
|
def _create_buf_records(self):
'\n Create buffer records for a new queue.\n '
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE_OR_REPLACE, 'key': aerospike.POLICY_KEY_SEND}
buf_bins = {'fencing-mark': 0, 'entries': []}
for i in range(self.slots_per_rec):
buf_bins['entries'].append({'offset': (- 1), 'value': None})
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
_ = self.client.put(buf_key, buf_bins, write_policy)
return
|
Create buffer records for a new queue.
|
large_queue.py
|
_create_buf_records
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _create_buf_records(self):
'\n \n '
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE_OR_REPLACE, 'key': aerospike.POLICY_KEY_SEND}
buf_bins = {'fencing-mark': 0, 'entries': []}
for i in range(self.slots_per_rec):
buf_bins['entries'].append({'offset': (- 1), 'value': None})
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
_ = self.client.put(buf_key, buf_bins, write_policy)
return
|
def _create_buf_records(self):
'\n \n '
write_policy = {'exists': aerospike.POLICY_EXISTS_CREATE_OR_REPLACE, 'key': aerospike.POLICY_KEY_SEND}
buf_bins = {'fencing-mark': 0, 'entries': []}
for i in range(self.slots_per_rec):
buf_bins['entries'].append({'offset': (- 1), 'value': None})
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
_ = self.client.put(buf_key, buf_bins, write_policy)
return<|docstring|>Create buffer records for a new queue.<|endoftext|>
|
4dd7b9125008f263e54b32a63fb5de38baab5241034f64417c2d56e9ce32b82a
|
def _reset_fencing_marks(self):
'\n Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps\n around to a non-positive value. While like to be very infrequent, if at all necessary, operation\n (a long fencing counter should make it unnecessary), it is critical for it to succeed.\n If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is\n reset.\n '
write_policy = {'exists': aerospike.POLICY_EXISTS_UPDATE}
try:
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
self.client.put(buf_key, {'fencing_mark': 0}, write_policy)
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
self.client.put(metadata_key, {'fencing-ctr': 0}, write_policy)
except ex:
print('LargeQueue: critical error. Failure during reset of fencing marks', ex)
raise ex
return
|
Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps
around to a non-positive value. While like to be very infrequent, if at all necessary, operation
(a long fencing counter should make it unnecessary), it is critical for it to succeed.
If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is
reset.
|
large_queue.py
|
_reset_fencing_marks
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _reset_fencing_marks(self):
'\n Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps\n around to a non-positive value. While like to be very infrequent, if at all necessary, operation\n (a long fencing counter should make it unnecessary), it is critical for it to succeed.\n If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is\n reset.\n '
write_policy = {'exists': aerospike.POLICY_EXISTS_UPDATE}
try:
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
self.client.put(buf_key, {'fencing_mark': 0}, write_policy)
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
self.client.put(metadata_key, {'fencing-ctr': 0}, write_policy)
except ex:
print('LargeQueue: critical error. Failure during reset of fencing marks', ex)
raise ex
return
|
def _reset_fencing_marks(self):
'\n Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps\n around to a non-positive value. While like to be very infrequent, if at all necessary, operation\n (a long fencing counter should make it unnecessary), it is critical for it to succeed.\n If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is\n reset.\n '
write_policy = {'exists': aerospike.POLICY_EXISTS_UPDATE}
try:
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
self.client.put(buf_key, {'fencing_mark': 0}, write_policy)
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
self.client.put(metadata_key, {'fencing-ctr': 0}, write_policy)
except ex:
print('LargeQueue: critical error. Failure during reset of fencing marks', ex)
raise ex
return<|docstring|>Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps
around to a non-positive value. While like to be very infrequent, if at all necessary, operation
(a long fencing counter should make it unnecessary), it is critical for it to succeed.
If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is
reset.<|endoftext|>
|
2f0ecf3ee6891c6993c51222a2a1b92f169762ed7181674c16cbb6d9b98dc0fa
|
def create_new_queue(self, client, namespace, q_name, max_size, slots_per_rec):
'\n Create a new queue using the input parameters.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace in which the queue records are to be stored\n :param q_name: name of the queue, used as the "set" name\n :param max_size: maximum number of entries to be held in the queue\n :param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully\n selected otherwise record overflow can result at runtime.\n '
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = slots_per_rec
self.num_buf_recs = (((max_size + self.slots_per_rec) - 1) / self.slots_per_rec)
self._create_metadata_record()
self._create_buf_records()
self.initialized = True
return
|
Create a new queue using the input parameters.
:param client: client object returned by aerospike.connect()
:param namespace: namespace in which the queue records are to be stored
:param q_name: name of the queue, used as the "set" name
:param max_size: maximum number of entries to be held in the queue
:param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully
selected otherwise record overflow can result at runtime.
|
large_queue.py
|
create_new_queue
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def create_new_queue(self, client, namespace, q_name, max_size, slots_per_rec):
'\n Create a new queue using the input parameters.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace in which the queue records are to be stored\n :param q_name: name of the queue, used as the "set" name\n :param max_size: maximum number of entries to be held in the queue\n :param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully\n selected otherwise record overflow can result at runtime.\n '
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = slots_per_rec
self.num_buf_recs = (((max_size + self.slots_per_rec) - 1) / self.slots_per_rec)
self._create_metadata_record()
self._create_buf_records()
self.initialized = True
return
|
def create_new_queue(self, client, namespace, q_name, max_size, slots_per_rec):
'\n Create a new queue using the input parameters.\n :param client: client object returned by aerospike.connect()\n :param namespace: namespace in which the queue records are to be stored\n :param q_name: name of the queue, used as the "set" name\n :param max_size: maximum number of entries to be held in the queue\n :param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully\n selected otherwise record overflow can result at runtime.\n '
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = slots_per_rec
self.num_buf_recs = (((max_size + self.slots_per_rec) - 1) / self.slots_per_rec)
self._create_metadata_record()
self._create_buf_records()
self.initialized = True
return<|docstring|>Create a new queue using the input parameters.
:param client: client object returned by aerospike.connect()
:param namespace: namespace in which the queue records are to be stored
:param q_name: name of the queue, used as the "set" name
:param max_size: maximum number of entries to be held in the queue
:param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully
selected otherwise record overflow can result at runtime.<|endoftext|>
|
834af888719a5ad2d5a8df1a84b9270e3874b440e28bf6ae9410a9985ee9ac4b
|
def initialize_existing_queue(self, client, namespace, q_name):
'\n Initialize an existing queue in the given namespace with the given name.\n :param client: client object returned by aerospike.connect().\n :param namespace: namespace in which the queue is stored\n :param q_name: name of the queue\n '
metadata = LargeQueue._get_metadata(client, namespace, q_name)
if (metadata is None):
raise ASAborted('Queue does not exist')
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = metadata['slots-per-rec']
self.num_buf_recs = metadata['num-buf-recs']
self.initialized = True
return
|
Initialize an existing queue in the given namespace with the given name.
:param client: client object returned by aerospike.connect().
:param namespace: namespace in which the queue is stored
:param q_name: name of the queue
|
large_queue.py
|
initialize_existing_queue
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def initialize_existing_queue(self, client, namespace, q_name):
'\n Initialize an existing queue in the given namespace with the given name.\n :param client: client object returned by aerospike.connect().\n :param namespace: namespace in which the queue is stored\n :param q_name: name of the queue\n '
metadata = LargeQueue._get_metadata(client, namespace, q_name)
if (metadata is None):
raise ASAborted('Queue does not exist')
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = metadata['slots-per-rec']
self.num_buf_recs = metadata['num-buf-recs']
self.initialized = True
return
|
def initialize_existing_queue(self, client, namespace, q_name):
'\n Initialize an existing queue in the given namespace with the given name.\n :param client: client object returned by aerospike.connect().\n :param namespace: namespace in which the queue is stored\n :param q_name: name of the queue\n '
metadata = LargeQueue._get_metadata(client, namespace, q_name)
if (metadata is None):
raise ASAborted('Queue does not exist')
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = metadata['slots-per-rec']
self.num_buf_recs = metadata['num-buf-recs']
self.initialized = True
return<|docstring|>Initialize an existing queue in the given namespace with the given name.
:param client: client object returned by aerospike.connect().
:param namespace: namespace in which the queue is stored
:param q_name: name of the queue<|endoftext|>
|
141e0960f73344c5510e41ede927621198ee794c2c9d4bf25e28f6258c872071
|
def _lock(self, txn_id, op):
"\n Atomically check if the queue is locked, break an expired lock, lock the queue and\n set the lock-owner and lock-time, and if the operation is enqueue, also increment and\n return the fencing counter.\n Try multiple times if the lock is not available, and wait before subsequent attempt.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param op: enqueue or dequeue\n :return: dict with head and tail positions on success\n throws ASAborted('Failed to acquire lock') on failure\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
for _ in range(LargeQueue.LOCK_MAX_RETRIES):
curr_time_ms = LargeQueue._curr_time_milliseconds()
predexps = [predexp.integer_bin('locked'), predexp.integer_value(0), predexp.integer_equal(), predexp.integer_bin('lock-time-ms'), predexp.integer_value((curr_time_ms - LargeQueue.LOCK_EXPIRATION_MS)), predexp.integer_less(), predexp.predexp_or(2)]
ops = [op_helpers.read('head-offset'), op_helpers.read('tail-offset'), op_helpers.write('locked', 1), op_helpers.write('lock-owner', txn_id), op_helpers.write('lock-time-ms', curr_time_ms)]
if (op == LargeQueue.Ops.Enqueue):
ops.append(op_helpers.increment('fencing-ctr', 1))
ops.append(op_helpers.read('fencing-ctr'))
try:
(_, _, record) = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
time.sleep((LargeQueue.LOCK_POLL_WAIT_MS / 1000.0))
continue
return record
raise ASAborted('Failed to acquire lock')
|
Atomically check if the queue is locked, break an expired lock, lock the queue and
set the lock-owner and lock-time, and if the operation is enqueue, also increment and
return the fencing counter.
Try multiple times if the lock is not available, and wait before subsequent attempt.
:param txn_id: lock owner id, must be unique among concurrent requests
:param op: enqueue or dequeue
:return: dict with head and tail positions on success
throws ASAborted('Failed to acquire lock') on failure
|
large_queue.py
|
_lock
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _lock(self, txn_id, op):
"\n Atomically check if the queue is locked, break an expired lock, lock the queue and\n set the lock-owner and lock-time, and if the operation is enqueue, also increment and\n return the fencing counter.\n Try multiple times if the lock is not available, and wait before subsequent attempt.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param op: enqueue or dequeue\n :return: dict with head and tail positions on success\n throws ASAborted('Failed to acquire lock') on failure\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
for _ in range(LargeQueue.LOCK_MAX_RETRIES):
curr_time_ms = LargeQueue._curr_time_milliseconds()
predexps = [predexp.integer_bin('locked'), predexp.integer_value(0), predexp.integer_equal(), predexp.integer_bin('lock-time-ms'), predexp.integer_value((curr_time_ms - LargeQueue.LOCK_EXPIRATION_MS)), predexp.integer_less(), predexp.predexp_or(2)]
ops = [op_helpers.read('head-offset'), op_helpers.read('tail-offset'), op_helpers.write('locked', 1), op_helpers.write('lock-owner', txn_id), op_helpers.write('lock-time-ms', curr_time_ms)]
if (op == LargeQueue.Ops.Enqueue):
ops.append(op_helpers.increment('fencing-ctr', 1))
ops.append(op_helpers.read('fencing-ctr'))
try:
(_, _, record) = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
time.sleep((LargeQueue.LOCK_POLL_WAIT_MS / 1000.0))
continue
return record
raise ASAborted('Failed to acquire lock')
|
def _lock(self, txn_id, op):
"\n Atomically check if the queue is locked, break an expired lock, lock the queue and\n set the lock-owner and lock-time, and if the operation is enqueue, also increment and\n return the fencing counter.\n Try multiple times if the lock is not available, and wait before subsequent attempt.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param op: enqueue or dequeue\n :return: dict with head and tail positions on success\n throws ASAborted('Failed to acquire lock') on failure\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
for _ in range(LargeQueue.LOCK_MAX_RETRIES):
curr_time_ms = LargeQueue._curr_time_milliseconds()
predexps = [predexp.integer_bin('locked'), predexp.integer_value(0), predexp.integer_equal(), predexp.integer_bin('lock-time-ms'), predexp.integer_value((curr_time_ms - LargeQueue.LOCK_EXPIRATION_MS)), predexp.integer_less(), predexp.predexp_or(2)]
ops = [op_helpers.read('head-offset'), op_helpers.read('tail-offset'), op_helpers.write('locked', 1), op_helpers.write('lock-owner', txn_id), op_helpers.write('lock-time-ms', curr_time_ms)]
if (op == LargeQueue.Ops.Enqueue):
ops.append(op_helpers.increment('fencing-ctr', 1))
ops.append(op_helpers.read('fencing-ctr'))
try:
(_, _, record) = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
time.sleep((LargeQueue.LOCK_POLL_WAIT_MS / 1000.0))
continue
return record
raise ASAborted('Failed to acquire lock')<|docstring|>Atomically check if the queue is locked, break an expired lock, lock the queue and
set the lock-owner and lock-time, and if the operation is enqueue, also increment and
return the fencing counter.
Try multiple times if the lock is not available, and wait before subsequent attempt.
:param txn_id: lock owner id, must be unique among concurrent requests
:param op: enqueue or dequeue
:return: dict with head and tail positions on success
throws ASAborted('Failed to acquire lock') on failure<|endoftext|>
|
8e5a3e3a83b73a5285ac7d4810c40fdae7da6611dfb516b0a6008a4db6b754f9
|
def _commit_release(self, txn_id, new_head_offset=None, new_tail_offset=None):
"\n If the lock is still held by this requester (txn-id), update the new positions of head/tail and r\n elease the lock. Otherwise abort the request as timed out.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param new_head_offset: new head offset to be updated\n :param new_tail_offset: new tail offset to be updated\n :return: throws ASAborted('Timed out')\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
predexps = [predexp.integer_bin('locked'), predexp.integer_value(1), predexp.integer_equal(), predexp.integer_bin('lock-owner'), predexp.integer_value(txn_id), predexp.integer_equal(), predexp.predexp_and(2)]
ops = [op_helpers.write('locked', 0), op_helpers.write('lock-owner', None), op_helpers.write('lock-time-ms', None)]
if (new_head_offset is not None):
ops.append(op_helpers.write('head-offset', new_head_offset))
if (new_tail_offset is not None):
ops.append(op_helpers.write('tail-offset', new_tail_offset))
try:
_ = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
return
|
If the lock is still held by this requester (txn-id), update the new positions of head/tail and r
elease the lock. Otherwise abort the request as timed out.
:param txn_id: lock owner id, must be unique among concurrent requests
:param new_head_offset: new head offset to be updated
:param new_tail_offset: new tail offset to be updated
:return: throws ASAborted('Timed out')
|
large_queue.py
|
_commit_release
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _commit_release(self, txn_id, new_head_offset=None, new_tail_offset=None):
"\n If the lock is still held by this requester (txn-id), update the new positions of head/tail and r\n elease the lock. Otherwise abort the request as timed out.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param new_head_offset: new head offset to be updated\n :param new_tail_offset: new tail offset to be updated\n :return: throws ASAborted('Timed out')\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
predexps = [predexp.integer_bin('locked'), predexp.integer_value(1), predexp.integer_equal(), predexp.integer_bin('lock-owner'), predexp.integer_value(txn_id), predexp.integer_equal(), predexp.predexp_and(2)]
ops = [op_helpers.write('locked', 0), op_helpers.write('lock-owner', None), op_helpers.write('lock-time-ms', None)]
if (new_head_offset is not None):
ops.append(op_helpers.write('head-offset', new_head_offset))
if (new_tail_offset is not None):
ops.append(op_helpers.write('tail-offset', new_tail_offset))
try:
_ = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
return
|
def _commit_release(self, txn_id, new_head_offset=None, new_tail_offset=None):
"\n If the lock is still held by this requester (txn-id), update the new positions of head/tail and r\n elease the lock. Otherwise abort the request as timed out.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param new_head_offset: new head offset to be updated\n :param new_tail_offset: new tail offset to be updated\n :return: throws ASAborted('Timed out')\n "
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
predexps = [predexp.integer_bin('locked'), predexp.integer_value(1), predexp.integer_equal(), predexp.integer_bin('lock-owner'), predexp.integer_value(txn_id), predexp.integer_equal(), predexp.predexp_and(2)]
ops = [op_helpers.write('locked', 0), op_helpers.write('lock-owner', None), op_helpers.write('lock-time-ms', None)]
if (new_head_offset is not None):
ops.append(op_helpers.write('head-offset', new_head_offset))
if (new_tail_offset is not None):
ops.append(op_helpers.write('tail-offset', new_tail_offset))
try:
_ = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
return<|docstring|>If the lock is still held by this requester (txn-id), update the new positions of head/tail and r
elease the lock. Otherwise abort the request as timed out.
:param txn_id: lock owner id, must be unique among concurrent requests
:param new_head_offset: new head offset to be updated
:param new_tail_offset: new tail offset to be updated
:return: throws ASAborted('Timed out')<|endoftext|>
|
1e2f6416f7f4969816225e87bc81d4bf60fb986a8f00661dc2177f6392c6e761
|
def _get_entry_location(self, entry_offset):
"\n Get the record index and entry index within the record, given the entry's offset.\n :param entry_offset: offset of the entry\n :return: tuple (record index, entry index)\n "
buf_rec_index = (int((entry_offset / self.slots_per_rec)) % self.num_buf_recs)
entry_index = (entry_offset % self.slots_per_rec)
return (buf_rec_index, entry_index)
|
Get the record index and entry index within the record, given the entry's offset.
:param entry_offset: offset of the entry
:return: tuple (record index, entry index)
|
large_queue.py
|
_get_entry_location
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _get_entry_location(self, entry_offset):
"\n Get the record index and entry index within the record, given the entry's offset.\n :param entry_offset: offset of the entry\n :return: tuple (record index, entry index)\n "
buf_rec_index = (int((entry_offset / self.slots_per_rec)) % self.num_buf_recs)
entry_index = (entry_offset % self.slots_per_rec)
return (buf_rec_index, entry_index)
|
def _get_entry_location(self, entry_offset):
"\n Get the record index and entry index within the record, given the entry's offset.\n :param entry_offset: offset of the entry\n :return: tuple (record index, entry index)\n "
buf_rec_index = (int((entry_offset / self.slots_per_rec)) % self.num_buf_recs)
entry_index = (entry_offset % self.slots_per_rec)
return (buf_rec_index, entry_index)<|docstring|>Get the record index and entry index within the record, given the entry's offset.
:param entry_offset: offset of the entry
:return: tuple (record index, entry index)<|endoftext|>
|
6e1a827973e21418b42d8ec1113616e7b81155c97da8713793d65f8bfe6fe173
|
def _queue_is_full(self, head_offset, tail_offset):
'\n Check if the queue is full.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if full, False otherwise\n '
num_entries = (tail_offset - head_offset)
return (num_entries == (self.num_buf_recs * self.slots_per_rec))
|
Check if the queue is full.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if full, False otherwise
|
large_queue.py
|
_queue_is_full
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _queue_is_full(self, head_offset, tail_offset):
'\n Check if the queue is full.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if full, False otherwise\n '
num_entries = (tail_offset - head_offset)
return (num_entries == (self.num_buf_recs * self.slots_per_rec))
|
def _queue_is_full(self, head_offset, tail_offset):
'\n Check if the queue is full.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if full, False otherwise\n '
num_entries = (tail_offset - head_offset)
return (num_entries == (self.num_buf_recs * self.slots_per_rec))<|docstring|>Check if the queue is full.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if full, False otherwise<|endoftext|>
|
f21d99fb52d44a24d1c77bdf34f6d4cc62582c511b115cf4d30503c38f0d09a6
|
def _queue_is_empty(self, head_offset, tail_offset):
'\n Check if the queue is empty.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if empty, False otherwise\n '
return (0 == (tail_offset - head_offset))
|
Check if the queue is empty.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if empty, False otherwise
|
large_queue.py
|
_queue_is_empty
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def _queue_is_empty(self, head_offset, tail_offset):
'\n Check if the queue is empty.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if empty, False otherwise\n '
return (0 == (tail_offset - head_offset))
|
def _queue_is_empty(self, head_offset, tail_offset):
'\n Check if the queue is empty.\n :param head_offset: Offset of the head entry.\n :param tail_offset: Offset of the tail entry (next added entry).\n :return: True if empty, False otherwise\n '
return (0 == (tail_offset - head_offset))<|docstring|>Check if the queue is empty.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if empty, False otherwise<|endoftext|>
|
e6c11911baa81f015ccfffc59281708d1ee8ae59e5541aa09d7e1a7a49152630
|
def enqueue(self, entry, txn_id, overwrite_if_full=False):
"\n Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.\n If the fencing counter has wrapped around, reset all fencing values.\n :param entry: new entry to be enqueued\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full\n :return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')\n "
q_state = self._lock(txn_id, LargeQueue.Ops.Enqueue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
fencing_ctr = q_state['fencing-ctr']
if (fencing_ctr <= 0):
self._reset_fencing_marks()
(buf_rec_index, entry_index) = self._get_entry_location(tail_offset)
entry_val = {'offset': tail_offset, 'entry': entry}
queue_is_full = self._queue_is_full(head_offset, tail_offset)
if (queue_is_full and (not overwrite_if_full)):
self._commit_release(txn_id)
raise ASAborted('Queue is full')
predexps = [predexp.integer_bin('fencing-mark'), predexp.integer_value(fencing_ctr), predexp.integer_less()]
ops = [op_helpers.write('fencing-mark', fencing_ctr), list_helpers.list_set('entries', entry_index, entry_val)]
buf_rec_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
try:
(_, _, record) = self.client.operate(buf_rec_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
self._commit_release(txn_id, new_head_offset=((head_offset + 1) if queue_is_full else None), new_tail_offset=(tail_offset + 1))
return tail_offset
|
Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.
If the fencing counter has wrapped around, reset all fencing values.
:param entry: new entry to be enqueued
:param txn_id: lock owner id, must be unique among concurrent requests
:param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full
:return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')
|
large_queue.py
|
enqueue
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def enqueue(self, entry, txn_id, overwrite_if_full=False):
"\n Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.\n If the fencing counter has wrapped around, reset all fencing values.\n :param entry: new entry to be enqueued\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full\n :return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')\n "
q_state = self._lock(txn_id, LargeQueue.Ops.Enqueue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
fencing_ctr = q_state['fencing-ctr']
if (fencing_ctr <= 0):
self._reset_fencing_marks()
(buf_rec_index, entry_index) = self._get_entry_location(tail_offset)
entry_val = {'offset': tail_offset, 'entry': entry}
queue_is_full = self._queue_is_full(head_offset, tail_offset)
if (queue_is_full and (not overwrite_if_full)):
self._commit_release(txn_id)
raise ASAborted('Queue is full')
predexps = [predexp.integer_bin('fencing-mark'), predexp.integer_value(fencing_ctr), predexp.integer_less()]
ops = [op_helpers.write('fencing-mark', fencing_ctr), list_helpers.list_set('entries', entry_index, entry_val)]
buf_rec_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
try:
(_, _, record) = self.client.operate(buf_rec_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
self._commit_release(txn_id, new_head_offset=((head_offset + 1) if queue_is_full else None), new_tail_offset=(tail_offset + 1))
return tail_offset
|
def enqueue(self, entry, txn_id, overwrite_if_full=False):
"\n Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.\n If the fencing counter has wrapped around, reset all fencing values.\n :param entry: new entry to be enqueued\n :param txn_id: lock owner id, must be unique among concurrent requests\n :param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full\n :return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')\n "
q_state = self._lock(txn_id, LargeQueue.Ops.Enqueue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
fencing_ctr = q_state['fencing-ctr']
if (fencing_ctr <= 0):
self._reset_fencing_marks()
(buf_rec_index, entry_index) = self._get_entry_location(tail_offset)
entry_val = {'offset': tail_offset, 'entry': entry}
queue_is_full = self._queue_is_full(head_offset, tail_offset)
if (queue_is_full and (not overwrite_if_full)):
self._commit_release(txn_id)
raise ASAborted('Queue is full')
predexps = [predexp.integer_bin('fencing-mark'), predexp.integer_value(fencing_ctr), predexp.integer_less()]
ops = [op_helpers.write('fencing-mark', fencing_ctr), list_helpers.list_set('entries', entry_index, entry_val)]
buf_rec_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
try:
(_, _, record) = self.client.operate(buf_rec_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
self._commit_release(txn_id, new_head_offset=((head_offset + 1) if queue_is_full else None), new_tail_offset=(tail_offset + 1))
return tail_offset<|docstring|>Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.
If the fencing counter has wrapped around, reset all fencing values.
:param entry: new entry to be enqueued
:param txn_id: lock owner id, must be unique among concurrent requests
:param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full
:return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')<|endoftext|>
|
b2429e9c9ba67b0dd5f203f6820876e1eeb2deb4c60b334133e7a2c7632936a9
|
def dequeue(self, txn_id):
'\n Dequee and return the entry at the head of the queue. If the queue is empty, returns None.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :return: dict containing entry and offset for the entry at the head of the queue,\n or None if the queue is empty\n '
q_state = self._lock(txn_id, LargeQueue.Ops.Dequeue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if self._queue_is_empty(head_offset, tail_offset):
self._commit_release(txn_id)
return None
(buf_rec_index, entry_index) = self._get_entry_location(head_offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
ops = [list_helpers.list_get('entries', entry_index)]
(_, _, record) = self.client.operate(buf_key, ops)
self._commit_release(txn_id, new_head_offset=(head_offset + 1))
return record['entries']
|
Dequee and return the entry at the head of the queue. If the queue is empty, returns None.
:param txn_id: lock owner id, must be unique among concurrent requests
:return: dict containing entry and offset for the entry at the head of the queue,
or None if the queue is empty
|
large_queue.py
|
dequeue
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def dequeue(self, txn_id):
'\n Dequee and return the entry at the head of the queue. If the queue is empty, returns None.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :return: dict containing entry and offset for the entry at the head of the queue,\n or None if the queue is empty\n '
q_state = self._lock(txn_id, LargeQueue.Ops.Dequeue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if self._queue_is_empty(head_offset, tail_offset):
self._commit_release(txn_id)
return None
(buf_rec_index, entry_index) = self._get_entry_location(head_offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
ops = [list_helpers.list_get('entries', entry_index)]
(_, _, record) = self.client.operate(buf_key, ops)
self._commit_release(txn_id, new_head_offset=(head_offset + 1))
return record['entries']
|
def dequeue(self, txn_id):
'\n Dequee and return the entry at the head of the queue. If the queue is empty, returns None.\n :param txn_id: lock owner id, must be unique among concurrent requests\n :return: dict containing entry and offset for the entry at the head of the queue,\n or None if the queue is empty\n '
q_state = self._lock(txn_id, LargeQueue.Ops.Dequeue)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if self._queue_is_empty(head_offset, tail_offset):
self._commit_release(txn_id)
return None
(buf_rec_index, entry_index) = self._get_entry_location(head_offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
ops = [list_helpers.list_get('entries', entry_index)]
(_, _, record) = self.client.operate(buf_key, ops)
self._commit_release(txn_id, new_head_offset=(head_offset + 1))
return record['entries']<|docstring|>Dequee and return the entry at the head of the queue. If the queue is empty, returns None.
:param txn_id: lock owner id, must be unique among concurrent requests
:return: dict containing entry and offset for the entry at the head of the queue,
or None if the queue is empty<|endoftext|>
|
fd07f2e07f85421a4494e7eafd3e8b83031b6597738ab0ab0bd07efd19bc6bdf
|
def get_entry_at_offset(self, offset):
'\n Get the entry at the given offset if the offset currently exists in the queue. The function\n does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed\n at that offset.\n :param offset: offset of the entry (offset is the monotonically increasing position in the queue)\n :return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None\n '
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = ['head-offset', 'tail-offset']
(_, _, q_state) = self.client.select(metadata_key, metadata_bins)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if ((offset >= tail_offset) or (offset < head_offset)):
return None
(buf_rec_index, entry_index) = self._get_entry_location(offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
entry = self.client.list_get(buf_key, 'entries', entry_index)
if (entry['offset'] != offset):
return None
return entry
|
Get the entry at the given offset if the offset currently exists in the queue. The function
does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed
at that offset.
:param offset: offset of the entry (offset is the monotonically increasing position in the queue)
:return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None
|
large_queue.py
|
get_entry_at_offset
|
neelp-git/aerospike-large-queue
| 2
|
python
|
def get_entry_at_offset(self, offset):
'\n Get the entry at the given offset if the offset currently exists in the queue. The function\n does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed\n at that offset.\n :param offset: offset of the entry (offset is the monotonically increasing position in the queue)\n :return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None\n '
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = ['head-offset', 'tail-offset']
(_, _, q_state) = self.client.select(metadata_key, metadata_bins)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if ((offset >= tail_offset) or (offset < head_offset)):
return None
(buf_rec_index, entry_index) = self._get_entry_location(offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
entry = self.client.list_get(buf_key, 'entries', entry_index)
if (entry['offset'] != offset):
return None
return entry
|
def get_entry_at_offset(self, offset):
'\n Get the entry at the given offset if the offset currently exists in the queue. The function\n does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed\n at that offset.\n :param offset: offset of the entry (offset is the monotonically increasing position in the queue)\n :return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None\n '
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = ['head-offset', 'tail-offset']
(_, _, q_state) = self.client.select(metadata_key, metadata_bins)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if ((offset >= tail_offset) or (offset < head_offset)):
return None
(buf_rec_index, entry_index) = self._get_entry_location(offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
entry = self.client.list_get(buf_key, 'entries', entry_index)
if (entry['offset'] != offset):
return None
return entry<|docstring|>Get the entry at the given offset if the offset currently exists in the queue. The function
does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed
at that offset.
:param offset: offset of the entry (offset is the monotonically increasing position in the queue)
:return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None<|endoftext|>
|
263e69ea858fe83c0f5103d351317ff1acf09ce981c0fd453a08fc0f2860a64b
|
def encode(self):
' Convert the bootloader-message to a can-message '
data = ([self.board_id, ((self.type << 6) | self.subject), self.number, self.data_counter] + self.data)
message = can.Message(self.BOOTLOADER_CAN_IDENTIFIER, data, extended=False, rtr=False)
return message
|
Convert the bootloader-message to a can-message
|
tools/bootloader/can/host/bootloader.py
|
encode
|
roboterclubaachen/xpcc
| 161
|
python
|
def encode(self):
' '
data = ([self.board_id, ((self.type << 6) | self.subject), self.number, self.data_counter] + self.data)
message = can.Message(self.BOOTLOADER_CAN_IDENTIFIER, data, extended=False, rtr=False)
return message
|
def encode(self):
' '
data = ([self.board_id, ((self.type << 6) | self.subject), self.number, self.data_counter] + self.data)
message = can.Message(self.BOOTLOADER_CAN_IDENTIFIER, data, extended=False, rtr=False)
return message<|docstring|>Convert the bootloader-message to a can-message<|endoftext|>
|
b88386d19c0428db1e051dad5a575db7b797a30e09f7344b1b8db9eef11bbfb2
|
def __init__(self, board_id, interface, debug=False):
'Constructor'
self.board = ProgrammeableBoard(board_id)
filter = BootloaderFilter(self._get_message)
self.interface = interface
self.interface.addFilter(filter)
self.debugmode = debug
self.msg_number = 0
self.msg_wait_for = threading.Event()
self.msg_queue = Queue.Queue()
|
Constructor
|
tools/bootloader/can/host/bootloader.py
|
__init__
|
roboterclubaachen/xpcc
| 161
|
python
|
def __init__(self, board_id, interface, debug=False):
self.board = ProgrammeableBoard(board_id)
filter = BootloaderFilter(self._get_message)
self.interface = interface
self.interface.addFilter(filter)
self.debugmode = debug
self.msg_number = 0
self.msg_wait_for = threading.Event()
self.msg_queue = Queue.Queue()
|
def __init__(self, board_id, interface, debug=False):
self.board = ProgrammeableBoard(board_id)
filter = BootloaderFilter(self._get_message)
self.interface = interface
self.interface.addFilter(filter)
self.debugmode = debug
self.msg_number = 0
self.msg_wait_for = threading.Event()
self.msg_queue = Queue.Queue()<|docstring|>Constructor<|endoftext|>
|
ebccabb366368f6ae57c414789a7e2e067b7fa2a85c0ba3a6eb2516c1b419c32
|
def identify(self):
'Send the "Identify" command until it gets a response from the \n\t\tbootloader and decode the returned information\n\t\t'
while True:
try:
self._start_bootloader_command()
response = self._send(subject=MessageSubject.IDENTIFY, timeout=0.1, attempts=10)
except BootloaderException:
pass
else:
break
self.board.bootloader_type = (response.data[0] >> 4)
self.board.version = (response.data[0] & 15)
self.board.pagesize = {0: 32, 1: 64, 2: 128, 3: 256}[response.data[1]]
self.board.pages = ((response.data[2] << 8) + response.data[3])
self.board.connected = True
|
Send the "Identify" command until it gets a response from the
bootloader and decode the returned information
|
tools/bootloader/can/host/bootloader.py
|
identify
|
roboterclubaachen/xpcc
| 161
|
python
|
def identify(self):
'Send the "Identify" command until it gets a response from the \n\t\tbootloader and decode the returned information\n\t\t'
while True:
try:
self._start_bootloader_command()
response = self._send(subject=MessageSubject.IDENTIFY, timeout=0.1, attempts=10)
except BootloaderException:
pass
else:
break
self.board.bootloader_type = (response.data[0] >> 4)
self.board.version = (response.data[0] & 15)
self.board.pagesize = {0: 32, 1: 64, 2: 128, 3: 256}[response.data[1]]
self.board.pages = ((response.data[2] << 8) + response.data[3])
self.board.connected = True
|
def identify(self):
'Send the "Identify" command until it gets a response from the \n\t\tbootloader and decode the returned information\n\t\t'
while True:
try:
self._start_bootloader_command()
response = self._send(subject=MessageSubject.IDENTIFY, timeout=0.1, attempts=10)
except BootloaderException:
pass
else:
break
self.board.bootloader_type = (response.data[0] >> 4)
self.board.version = (response.data[0] & 15)
self.board.pagesize = {0: 32, 1: 64, 2: 128, 3: 256}[response.data[1]]
self.board.pages = ((response.data[2] << 8) + response.data[3])
self.board.connected = True<|docstring|>Send the "Identify" command until it gets a response from the
bootloader and decode the returned information<|endoftext|>
|
78ad1b867c11d5911b478beb84022f5b9785df8bf02f92c66ffd0b8dc6a33ac8
|
def program_page(self, page, data, addressAlreadySet=False):
'Program a page of the flash memory\n\t\t\n\t\tTries the send the data in a blocks of 32 messages befor an\n\t\tacknowledge. The blocksize is stepwise reduced to one when there\n\t\tare any errors during the transmission.\n\t\tRaises BootloaderException if the error stil appears then.\n\t\t'
data = [ord(x) for x in data]
size = len(data)
if (size < self.board.pagesize):
data += ([255] * (self.board.pagesize - size))
remaining = (self.board.pagesize / 4)
blocksize = 64
offset = 0
while (remaining > 0):
try:
if (not addressAlreadySet):
self._send(MessageSubject.SET_ADDRESS, [(page >> 8), (page & 255), 0, offset])
if (remaining < blocksize):
blocksize = remaining
if (blocksize == 1):
answer = self._send(MessageSubject.DATA, data[(offset * 4):((offset * 4) + 4)])
else:
i = offset
self._send(MessageSubject.DATA, response=False, counter=(Message.START_OF_MESSAGE_MASK | (blocksize - 1)), data=data[(i * 4):((i * 4) + 4)])
for k in range((blocksize - 2), 0, (- 1)):
i += 1
self._send(MessageSubject.DATA, response=False, counter=k, data=data[(i * 4):((i * 4) + 4)])
i += 1
answer = self._send(MessageSubject.DATA, response=True, counter=0, data=data[(i * 4):((i * 4) + 4)])
remaining -= blocksize
offset += blocksize
addressAlreadySet = True
except BootloaderException as msg:
(print('Exception: %s') % msg)
if (blocksize > 1):
blocksize /= 2
print(blocksize)
addressAlreadySet = False
time.sleep(0.3)
else:
raise
returned_page = ((answer.data[0] << 8) | answer.data[1])
if (returned_page != page):
raise BootloaderException(('Could not write page %i!' % page))
|
Program a page of the flash memory
Tries the send the data in a blocks of 32 messages befor an
acknowledge. The blocksize is stepwise reduced to one when there
are any errors during the transmission.
Raises BootloaderException if the error stil appears then.
|
tools/bootloader/can/host/bootloader.py
|
program_page
|
roboterclubaachen/xpcc
| 161
|
python
|
def program_page(self, page, data, addressAlreadySet=False):
'Program a page of the flash memory\n\t\t\n\t\tTries the send the data in a blocks of 32 messages befor an\n\t\tacknowledge. The blocksize is stepwise reduced to one when there\n\t\tare any errors during the transmission.\n\t\tRaises BootloaderException if the error stil appears then.\n\t\t'
data = [ord(x) for x in data]
size = len(data)
if (size < self.board.pagesize):
data += ([255] * (self.board.pagesize - size))
remaining = (self.board.pagesize / 4)
blocksize = 64
offset = 0
while (remaining > 0):
try:
if (not addressAlreadySet):
self._send(MessageSubject.SET_ADDRESS, [(page >> 8), (page & 255), 0, offset])
if (remaining < blocksize):
blocksize = remaining
if (blocksize == 1):
answer = self._send(MessageSubject.DATA, data[(offset * 4):((offset * 4) + 4)])
else:
i = offset
self._send(MessageSubject.DATA, response=False, counter=(Message.START_OF_MESSAGE_MASK | (blocksize - 1)), data=data[(i * 4):((i * 4) + 4)])
for k in range((blocksize - 2), 0, (- 1)):
i += 1
self._send(MessageSubject.DATA, response=False, counter=k, data=data[(i * 4):((i * 4) + 4)])
i += 1
answer = self._send(MessageSubject.DATA, response=True, counter=0, data=data[(i * 4):((i * 4) + 4)])
remaining -= blocksize
offset += blocksize
addressAlreadySet = True
except BootloaderException as msg:
(print('Exception: %s') % msg)
if (blocksize > 1):
blocksize /= 2
print(blocksize)
addressAlreadySet = False
time.sleep(0.3)
else:
raise
returned_page = ((answer.data[0] << 8) | answer.data[1])
if (returned_page != page):
raise BootloaderException(('Could not write page %i!' % page))
|
def program_page(self, page, data, addressAlreadySet=False):
'Program a page of the flash memory\n\t\t\n\t\tTries the send the data in a blocks of 32 messages befor an\n\t\tacknowledge. The blocksize is stepwise reduced to one when there\n\t\tare any errors during the transmission.\n\t\tRaises BootloaderException if the error stil appears then.\n\t\t'
data = [ord(x) for x in data]
size = len(data)
if (size < self.board.pagesize):
data += ([255] * (self.board.pagesize - size))
remaining = (self.board.pagesize / 4)
blocksize = 64
offset = 0
while (remaining > 0):
try:
if (not addressAlreadySet):
self._send(MessageSubject.SET_ADDRESS, [(page >> 8), (page & 255), 0, offset])
if (remaining < blocksize):
blocksize = remaining
if (blocksize == 1):
answer = self._send(MessageSubject.DATA, data[(offset * 4):((offset * 4) + 4)])
else:
i = offset
self._send(MessageSubject.DATA, response=False, counter=(Message.START_OF_MESSAGE_MASK | (blocksize - 1)), data=data[(i * 4):((i * 4) + 4)])
for k in range((blocksize - 2), 0, (- 1)):
i += 1
self._send(MessageSubject.DATA, response=False, counter=k, data=data[(i * 4):((i * 4) + 4)])
i += 1
answer = self._send(MessageSubject.DATA, response=True, counter=0, data=data[(i * 4):((i * 4) + 4)])
remaining -= blocksize
offset += blocksize
addressAlreadySet = True
except BootloaderException as msg:
(print('Exception: %s') % msg)
if (blocksize > 1):
blocksize /= 2
print(blocksize)
addressAlreadySet = False
time.sleep(0.3)
else:
raise
returned_page = ((answer.data[0] << 8) | answer.data[1])
if (returned_page != page):
raise BootloaderException(('Could not write page %i!' % page))<|docstring|>Program a page of the flash memory
Tries the send the data in a blocks of 32 messages befor an
acknowledge. The blocksize is stepwise reduced to one when there
are any errors during the transmission.
Raises BootloaderException if the error stil appears then.<|endoftext|>
|
88899d1e8b2d44e69c9a9381ce3782a567c5de2520e7abd7c7e19623ebacffa3
|
def start_app(self):
'Start the written application'
self._send(MessageSubject.START_APPLICATION)
|
Start the written application
|
tools/bootloader/can/host/bootloader.py
|
start_app
|
roboterclubaachen/xpcc
| 161
|
python
|
def start_app(self):
self._send(MessageSubject.START_APPLICATION)
|
def start_app(self):
self._send(MessageSubject.START_APPLICATION)<|docstring|>Start the written application<|endoftext|>
|
21d54426904ceaa8ee1e3594d7d71054ad3f18351ead3ced1030d3f01b800f49
|
def program(self, segments):
'Program the AVR\n\t\t\n\t\tFirst the function waits for a connection then it will send the\n\t\tdata page by page. Finally the written application will be started.\n\t\t'
self._report_progress(self.WAITING)
print('connecting ... ')
self.identify()
print('ok')
print(self.board)
totalsize = reduce((lambda x, y: (x + y)), map((lambda x: len(x)), segments))
segment_number = 0
pagesize = self.board.pagesize
pages = int(math.ceil((float(totalsize) / float(pagesize))))
print(('write %i pages\n' % pages))
if (pages > self.board.pages):
raise BootloaderException('Programsize exceeds available Flash!')
self._report_progress(self.START)
starttime = time.time()
addressSet = False
offset = 0
for i in range(pages):
data = segments[segment_number]
self.program_page(page=i, data=data[offset:(offset + pagesize)], addressAlreadySet=addressSet)
offset += pagesize
if (offset >= len(data)):
offset = 0
segment_number += 1
self.debug(('Now starting segment %i' % segment_number))
addressSet = True
self._report_progress(self.IN_PROGRESS, (float(i) / float(pages)))
self._report_progress(self.END)
endtime = time.time()
print(('%.2f seconds\n' % (endtime - starttime)))
self.start_app()
|
Program the AVR
First the function waits for a connection then it will send the
data page by page. Finally the written application will be started.
|
tools/bootloader/can/host/bootloader.py
|
program
|
roboterclubaachen/xpcc
| 161
|
python
|
def program(self, segments):
'Program the AVR\n\t\t\n\t\tFirst the function waits for a connection then it will send the\n\t\tdata page by page. Finally the written application will be started.\n\t\t'
self._report_progress(self.WAITING)
print('connecting ... ')
self.identify()
print('ok')
print(self.board)
totalsize = reduce((lambda x, y: (x + y)), map((lambda x: len(x)), segments))
segment_number = 0
pagesize = self.board.pagesize
pages = int(math.ceil((float(totalsize) / float(pagesize))))
print(('write %i pages\n' % pages))
if (pages > self.board.pages):
raise BootloaderException('Programsize exceeds available Flash!')
self._report_progress(self.START)
starttime = time.time()
addressSet = False
offset = 0
for i in range(pages):
data = segments[segment_number]
self.program_page(page=i, data=data[offset:(offset + pagesize)], addressAlreadySet=addressSet)
offset += pagesize
if (offset >= len(data)):
offset = 0
segment_number += 1
self.debug(('Now starting segment %i' % segment_number))
addressSet = True
self._report_progress(self.IN_PROGRESS, (float(i) / float(pages)))
self._report_progress(self.END)
endtime = time.time()
print(('%.2f seconds\n' % (endtime - starttime)))
self.start_app()
|
def program(self, segments):
'Program the AVR\n\t\t\n\t\tFirst the function waits for a connection then it will send the\n\t\tdata page by page. Finally the written application will be started.\n\t\t'
self._report_progress(self.WAITING)
print('connecting ... ')
self.identify()
print('ok')
print(self.board)
totalsize = reduce((lambda x, y: (x + y)), map((lambda x: len(x)), segments))
segment_number = 0
pagesize = self.board.pagesize
pages = int(math.ceil((float(totalsize) / float(pagesize))))
print(('write %i pages\n' % pages))
if (pages > self.board.pages):
raise BootloaderException('Programsize exceeds available Flash!')
self._report_progress(self.START)
starttime = time.time()
addressSet = False
offset = 0
for i in range(pages):
data = segments[segment_number]
self.program_page(page=i, data=data[offset:(offset + pagesize)], addressAlreadySet=addressSet)
offset += pagesize
if (offset >= len(data)):
offset = 0
segment_number += 1
self.debug(('Now starting segment %i' % segment_number))
addressSet = True
self._report_progress(self.IN_PROGRESS, (float(i) / float(pages)))
self._report_progress(self.END)
endtime = time.time()
print(('%.2f seconds\n' % (endtime - starttime)))
self.start_app()<|docstring|>Program the AVR
First the function waits for a connection then it will send the
data page by page. Finally the written application will be started.<|endoftext|>
|
e0bbe944ee6ec21baae50194d6021bad957c14206f2b9491d46da41a1c09687d
|
def _send(self, subject, data=[], counter=(Message.START_OF_MESSAGE_MASK | 0), response=True, timeout=0.5, attempts=2):
'Send a message via CAN Bus\n\t\t\n\t\tWith default settings the functions waits for the response to the\n\t\tmessage and retry the transmission after a timeout. After the\n\t\tspecifed number of retries it will raise a BootloaderException.\n\t\t\n\t\tKeeps track of the message numbering and restores the correct number\n\t\tin case of a reported error.'
message = Message(board_id=self.board.id, type=MessageType.REQUEST, subject=subject, number=self.msg_number, data_counter=counter, data=data)
if (not response):
self.interface.send(message.encode())
self.msg_number = ((self.msg_number + 1) & 255)
return None
repeats = 0
finished = False
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
while (not finished):
self.interface.send(message.encode())
while True:
try:
response_msg = self.msg_queue.get(True, timeout)
except Queue.Empty:
break
else:
if (response_msg.subject == message.subject):
if (response_msg.type == MessageType.SUCCESS):
finished = True
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
break
elif (response_msg.type == MessageType.WRONG_NUMBER):
print(('Warning: Wrong message number detected (board says %x, I have %x)' % (response_msg.number, message.number)))
if (message.number == 0):
self.msg_number = response_msg.number
message.number = self.msg_number
time.sleep(0.1)
while True:
try:
self.msg_queue.get(False, 0.1)
except Queue.Empty:
break
addressAlreadySet = False
break
else:
raise BootloaderException(("Failure %i while sending '%s'" % (response_msg.type, message)))
else:
self.debug(('Warning: Discarding obviously old message (received %i/%x, I have %i/%x)' % (response_msg.subject, response_msg.number, message.subject, message.number)))
repeats += 1
if ((attempts > 0) and (repeats >= attempts)):
raise BootloaderException(("No response after %i attempts and timeout %i while sending '%s'" % (repeats, timeout, message)))
self.msg_number = ((self.msg_number + 1) & 255)
return response_msg
|
Send a message via CAN Bus
With default settings the functions waits for the response to the
message and retry the transmission after a timeout. After the
specifed number of retries it will raise a BootloaderException.
Keeps track of the message numbering and restores the correct number
in case of a reported error.
|
tools/bootloader/can/host/bootloader.py
|
_send
|
roboterclubaachen/xpcc
| 161
|
python
|
def _send(self, subject, data=[], counter=(Message.START_OF_MESSAGE_MASK | 0), response=True, timeout=0.5, attempts=2):
'Send a message via CAN Bus\n\t\t\n\t\tWith default settings the functions waits for the response to the\n\t\tmessage and retry the transmission after a timeout. After the\n\t\tspecifed number of retries it will raise a BootloaderException.\n\t\t\n\t\tKeeps track of the message numbering and restores the correct number\n\t\tin case of a reported error.'
message = Message(board_id=self.board.id, type=MessageType.REQUEST, subject=subject, number=self.msg_number, data_counter=counter, data=data)
if (not response):
self.interface.send(message.encode())
self.msg_number = ((self.msg_number + 1) & 255)
return None
repeats = 0
finished = False
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
while (not finished):
self.interface.send(message.encode())
while True:
try:
response_msg = self.msg_queue.get(True, timeout)
except Queue.Empty:
break
else:
if (response_msg.subject == message.subject):
if (response_msg.type == MessageType.SUCCESS):
finished = True
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
break
elif (response_msg.type == MessageType.WRONG_NUMBER):
print(('Warning: Wrong message number detected (board says %x, I have %x)' % (response_msg.number, message.number)))
if (message.number == 0):
self.msg_number = response_msg.number
message.number = self.msg_number
time.sleep(0.1)
while True:
try:
self.msg_queue.get(False, 0.1)
except Queue.Empty:
break
addressAlreadySet = False
break
else:
raise BootloaderException(("Failure %i while sending '%s'" % (response_msg.type, message)))
else:
self.debug(('Warning: Discarding obviously old message (received %i/%x, I have %i/%x)' % (response_msg.subject, response_msg.number, message.subject, message.number)))
repeats += 1
if ((attempts > 0) and (repeats >= attempts)):
raise BootloaderException(("No response after %i attempts and timeout %i while sending '%s'" % (repeats, timeout, message)))
self.msg_number = ((self.msg_number + 1) & 255)
return response_msg
|
def _send(self, subject, data=[], counter=(Message.START_OF_MESSAGE_MASK | 0), response=True, timeout=0.5, attempts=2):
'Send a message via CAN Bus\n\t\t\n\t\tWith default settings the functions waits for the response to the\n\t\tmessage and retry the transmission after a timeout. After the\n\t\tspecifed number of retries it will raise a BootloaderException.\n\t\t\n\t\tKeeps track of the message numbering and restores the correct number\n\t\tin case of a reported error.'
message = Message(board_id=self.board.id, type=MessageType.REQUEST, subject=subject, number=self.msg_number, data_counter=counter, data=data)
if (not response):
self.interface.send(message.encode())
self.msg_number = ((self.msg_number + 1) & 255)
return None
repeats = 0
finished = False
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
while (not finished):
self.interface.send(message.encode())
while True:
try:
response_msg = self.msg_queue.get(True, timeout)
except Queue.Empty:
break
else:
if (response_msg.subject == message.subject):
if (response_msg.type == MessageType.SUCCESS):
finished = True
while True:
try:
self.msg_queue.get(False, 0)
except Queue.Empty:
break
break
elif (response_msg.type == MessageType.WRONG_NUMBER):
print(('Warning: Wrong message number detected (board says %x, I have %x)' % (response_msg.number, message.number)))
if (message.number == 0):
self.msg_number = response_msg.number
message.number = self.msg_number
time.sleep(0.1)
while True:
try:
self.msg_queue.get(False, 0.1)
except Queue.Empty:
break
addressAlreadySet = False
break
else:
raise BootloaderException(("Failure %i while sending '%s'" % (response_msg.type, message)))
else:
self.debug(('Warning: Discarding obviously old message (received %i/%x, I have %i/%x)' % (response_msg.subject, response_msg.number, message.subject, message.number)))
repeats += 1
if ((attempts > 0) and (repeats >= attempts)):
raise BootloaderException(("No response after %i attempts and timeout %i while sending '%s'" % (repeats, timeout, message)))
self.msg_number = ((self.msg_number + 1) & 255)
return response_msg<|docstring|>Send a message via CAN Bus
With default settings the functions waits for the response to the
message and retry the transmission after a timeout. After the
specifed number of retries it will raise a BootloaderException.
Keeps track of the message numbering and restores the correct number
in case of a reported error.<|endoftext|>
|
024c54b4e1bbb74b0ce1a94d0bdd7f925295493cd95e29d1a83561c933fcf8c1
|
def _get_message(self, can_message):
'Receives and checks all messages from the CAN bus'
self.debug(('> ' + str(can_message)))
try:
message = Message().decode(can_message)
if (message.board_id != self.board.id):
return
except BootloaderException:
return
self.msg_queue.put(message)
|
Receives and checks all messages from the CAN bus
|
tools/bootloader/can/host/bootloader.py
|
_get_message
|
roboterclubaachen/xpcc
| 161
|
python
|
def _get_message(self, can_message):
self.debug(('> ' + str(can_message)))
try:
message = Message().decode(can_message)
if (message.board_id != self.board.id):
return
except BootloaderException:
return
self.msg_queue.put(message)
|
def _get_message(self, can_message):
self.debug(('> ' + str(can_message)))
try:
message = Message().decode(can_message)
if (message.board_id != self.board.id):
return
except BootloaderException:
return
self.msg_queue.put(message)<|docstring|>Receives and checks all messages from the CAN bus<|endoftext|>
|
52ff819e9c6cd7c84864a3393aa90222f9c75114c0e03afd16adaf74ab19d2ed
|
def _report_progress(self, state, progress=0.0):
'Called to report the current status\n\t\t\n\t\tCan be overwritten to implement a progressbar for example.\n\t\t'
pass
|
Called to report the current status
Can be overwritten to implement a progressbar for example.
|
tools/bootloader/can/host/bootloader.py
|
_report_progress
|
roboterclubaachen/xpcc
| 161
|
python
|
def _report_progress(self, state, progress=0.0):
'Called to report the current status\n\t\t\n\t\tCan be overwritten to implement a progressbar for example.\n\t\t'
pass
|
def _report_progress(self, state, progress=0.0):
'Called to report the current status\n\t\t\n\t\tCan be overwritten to implement a progressbar for example.\n\t\t'
pass<|docstring|>Called to report the current status
Can be overwritten to implement a progressbar for example.<|endoftext|>
|
0f0b40d7dd19a4c9f67dfefa593e4d4c2e4190cb2332829cb4a76ad9bedf45a0
|
def applyVisualizationCPUUsage(self, Event):
"\n\t\tSet the Powermates' LEDs to the current system CPU Usage\n\t\t"
Count = 0
for Powermate in Gizmod.Powermates:
if (Count >= Event.NumCPUs):
Count = 0
else:
Count += 1
Powermate.LEDPercent = Event.getCPUUsageAvg(Count)
|
Set the Powermates' LEDs to the current system CPU Usage
|
scripts/modules.d/001-Powermate-Visualizer.py
|
applyVisualizationCPUUsage
|
pauljeremyturner/gizmod
| 0
|
python
|
def applyVisualizationCPUUsage(self, Event):
"\n\t\t\n\t\t"
Count = 0
for Powermate in Gizmod.Powermates:
if (Count >= Event.NumCPUs):
Count = 0
else:
Count += 1
Powermate.LEDPercent = Event.getCPUUsageAvg(Count)
|
def applyVisualizationCPUUsage(self, Event):
"\n\t\t\n\t\t"
Count = 0
for Powermate in Gizmod.Powermates:
if (Count >= Event.NumCPUs):
Count = 0
else:
Count += 1
Powermate.LEDPercent = Event.getCPUUsageAvg(Count)<|docstring|>Set the Powermates' LEDs to the current system CPU Usage<|endoftext|>
|
db7a5e60f69ea162f74f56db49cd9d734ca216d96b31a8d86ebc12fb1d3e0a5d
|
def applyVisualizationVolume(self):
"\n\t\tSet the Powermates' LEDs to the Default playback volume mixer's level\n\t\t"
if ((not Gizmod.DefaultMixerVolume) or (not Gizmod.DefaultMixerSwitch)):
return
if Gizmod.DefaultMixerSwitch.SwitchPlayback:
for Powermate in Gizmod.Powermates:
Powermate.LEDPercent = Gizmod.DefaultMixerVolume.VolumePlaybackPercent
else:
for Powermate in Gizmod.Powermates:
Powermate.pulseLED(255, 257, 2)
|
Set the Powermates' LEDs to the Default playback volume mixer's level
|
scripts/modules.d/001-Powermate-Visualizer.py
|
applyVisualizationVolume
|
pauljeremyturner/gizmod
| 0
|
python
|
def applyVisualizationVolume(self):
"\n\t\t\n\t\t"
if ((not Gizmod.DefaultMixerVolume) or (not Gizmod.DefaultMixerSwitch)):
return
if Gizmod.DefaultMixerSwitch.SwitchPlayback:
for Powermate in Gizmod.Powermates:
Powermate.LEDPercent = Gizmod.DefaultMixerVolume.VolumePlaybackPercent
else:
for Powermate in Gizmod.Powermates:
Powermate.pulseLED(255, 257, 2)
|
def applyVisualizationVolume(self):
"\n\t\t\n\t\t"
if ((not Gizmod.DefaultMixerVolume) or (not Gizmod.DefaultMixerSwitch)):
return
if Gizmod.DefaultMixerSwitch.SwitchPlayback:
for Powermate in Gizmod.Powermates:
Powermate.LEDPercent = Gizmod.DefaultMixerVolume.VolumePlaybackPercent
else:
for Powermate in Gizmod.Powermates:
Powermate.pulseLED(255, 257, 2)<|docstring|>Set the Powermates' LEDs to the Default playback volume mixer's level<|endoftext|>
|
f286464c80b6c5fc584937b16ca145beb6f9add48fb05274556eb10c43cc4936
|
def applyVisualizationSound(self, Event):
"\n\t\tSet the Powermates' LEDs to the sound level\n\t\t"
if (len(Gizmod.Powermates) == 1):
Gizmod.Powermates[0].LEDPercent = (Event.VUCombined * 100.0)
else:
Gizmod.Powermates[0].LEDPercent = (Event.VULeft * 100.0)
Gizmod.Powermates[1].LEDPercent = (Event.VURight * 100.0)
|
Set the Powermates' LEDs to the sound level
|
scripts/modules.d/001-Powermate-Visualizer.py
|
applyVisualizationSound
|
pauljeremyturner/gizmod
| 0
|
python
|
def applyVisualizationSound(self, Event):
"\n\t\t\n\t\t"
if (len(Gizmod.Powermates) == 1):
Gizmod.Powermates[0].LEDPercent = (Event.VUCombined * 100.0)
else:
Gizmod.Powermates[0].LEDPercent = (Event.VULeft * 100.0)
Gizmod.Powermates[1].LEDPercent = (Event.VURight * 100.0)
|
def applyVisualizationSound(self, Event):
"\n\t\t\n\t\t"
if (len(Gizmod.Powermates) == 1):
Gizmod.Powermates[0].LEDPercent = (Event.VUCombined * 100.0)
else:
Gizmod.Powermates[0].LEDPercent = (Event.VULeft * 100.0)
Gizmod.Powermates[1].LEDPercent = (Event.VURight * 100.0)<|docstring|>Set the Powermates' LEDs to the sound level<|endoftext|>
|
1a965028e40c14e86b25ab5cce15c801a474a6654b5de9e60075dda6bba98e7a
|
def __init__(self):
' \n\t\tDefault Constructor\n\t\t'
VisualizerDefault.__init__(self)
Gizmod.printNiceScriptInit(1, self.__class__.__name__, self.__class__.__doc__, (str(len(Gizmod.Powermates)) + ' Powermates'))
|
Default Constructor
|
scripts/modules.d/001-Powermate-Visualizer.py
|
__init__
|
pauljeremyturner/gizmod
| 0
|
python
|
def __init__(self):
' \n\t\t\n\t\t'
VisualizerDefault.__init__(self)
Gizmod.printNiceScriptInit(1, self.__class__.__name__, self.__class__.__doc__, (str(len(Gizmod.Powermates)) + ' Powermates'))
|
def __init__(self):
' \n\t\t\n\t\t'
VisualizerDefault.__init__(self)
Gizmod.printNiceScriptInit(1, self.__class__.__name__, self.__class__.__doc__, (str(len(Gizmod.Powermates)) + ' Powermates'))<|docstring|>Default Constructor<|endoftext|>
|
fb03c0319ebfd6eaa51830e3889be3673da105722f6a02d14082e39c2c8338cb
|
def read(data):
'Return the graph of caves.'
graph = defaultdict(list)
for line in data.splitlines():
(node1, node2) = re.match('(\\w+)-(\\w+)', line).group(1, 2)
graph[node1].append(node2)
graph[node2].append(node1)
return graph
|
Return the graph of caves.
|
advent of code/2021/day12.py
|
read
|
mwermelinger/algorithmic-problems
| 0
|
python
|
def read(data):
graph = defaultdict(list)
for line in data.splitlines():
(node1, node2) = re.match('(\\w+)-(\\w+)', line).group(1, 2)
graph[node1].append(node2)
graph[node2].append(node1)
return graph
|
def read(data):
graph = defaultdict(list)
for line in data.splitlines():
(node1, node2) = re.match('(\\w+)-(\\w+)', line).group(1, 2)
graph[node1].append(node2)
graph[node2].append(node1)
return graph<|docstring|>Return the graph of caves.<|endoftext|>
|
69b6413f5c541e48ee78b7a76b6dd8d7402a0c6e8e2f0c4021c1c53cea79f653
|
def part1(data):
'Return how many paths from start to end visit small caves once.'
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total
graph = read(data)
return paths('start', {'start'}, ['start'])
|
Return how many paths from start to end visit small caves once.
|
advent of code/2021/day12.py
|
part1
|
mwermelinger/algorithmic-problems
| 0
|
python
|
def part1(data):
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total
graph = read(data)
return paths('start', {'start'}, ['start'])
|
def part1(data):
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total
graph = read(data)
return paths('start', {'start'}, ['start'])<|docstring|>Return how many paths from start to end visit small caves once.<|endoftext|>
|
f7cb6b8a4b5e0b0629b2e62974590c8ec8bbb1e6ec5a8b1a7eb59b1226318bf2
|
def part2(data):
'Return how many paths from start to end visit 0 or 1 small cave twice.\n\n The start and end caves are visited once only.\n '
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either '' or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total
graph = read(data)
return paths('start', {'start'}, ['start'], '')
|
Return how many paths from start to end visit 0 or 1 small cave twice.
The start and end caves are visited once only.
|
advent of code/2021/day12.py
|
part2
|
mwermelinger/algorithmic-problems
| 0
|
python
|
def part2(data):
'Return how many paths from start to end visit 0 or 1 small cave twice.\n\n The start and end caves are visited once only.\n '
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total
graph = read(data)
return paths('start', {'start'}, ['start'], )
|
def part2(data):
'Return how many paths from start to end visit 0 or 1 small cave twice.\n\n The start and end caves are visited once only.\n '
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total
graph = read(data)
return paths('start', {'start'}, ['start'], )<|docstring|>Return how many paths from start to end visit 0 or 1 small cave twice.
The start and end caves are visited once only.<|endoftext|>
|
cad7836055229d61a4ca8f22262b685bf16d272b66d01fc79eb8f55bc7f2011c
|
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total
|
Return how many paths from node to end don't revisit visited.
Path ends in node and is for debugging purposes only.
|
advent of code/2021/day12.py
|
paths
|
mwermelinger/algorithmic-problems
| 0
|
python
|
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total
|
def paths(node: str, visited: set, path: list) -> int:
"Return how many paths from node to end don't revisit visited.\n\n Path ends in node and is for debugging purposes only.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
if (neighbour not in visited):
new_path = (path + [neighbour])
if neighbour.islower():
total += paths(neighbour, (visited | {neighbour}), new_path)
else:
total += paths(neighbour, visited, new_path)
return total<|docstring|>Return how many paths from node to end don't revisit visited.
Path ends in node and is for debugging purposes only.<|endoftext|>
|
97d47899ed326897f5160de5f994c4ee182c7c32984170b40f421654313eb267
|
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either '' or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total
|
Return how many paths from node to end obey the conditions.
Twice is either '' or the small cave that was visited two times.
|
advent of code/2021/day12.py
|
paths
|
mwermelinger/algorithmic-problems
| 0
|
python
|
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total
|
def paths(node: str, visited: set, path: list, twice: str) -> int:
"Return how many paths from node to end obey the conditions.\n\n Twice is either or the small cave that was visited two times.\n "
if (node == 'end'):
return 1
total = 0
for neighbour in graph[node]:
new_path = (path + [neighbour])
if neighbour.isupper():
total += paths(neighbour, visited, new_path, twice)
elif (neighbour not in visited):
total += paths(neighbour, (visited | {neighbour}), new_path, twice)
elif ((neighbour != 'start') and (not twice)):
total += paths(neighbour, visited, new_path, neighbour)
return total<|docstring|>Return how many paths from node to end obey the conditions.
Twice is either '' or the small cave that was visited two times.<|endoftext|>
|
7f60b6e564e083bc3c5c83779ab7d5c33784113274939df25a62b3a4ae84c3a3
|
def aesara_code(expr, cache=None, **kwargs):
"\n Convert a Sympy expression into a Aesara graph variable.\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression object to convert.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara symbolic\n expression graph.\n\n "
if (not aesara):
raise ImportError('aesara is required for aesara_code')
if (cache is None):
cache = global_cache
return AesaraPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
|
Convert a Sympy expression into a Aesara graph variable.
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression object to convert.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara symbolic
expression graph.
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
aesara_code
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def aesara_code(expr, cache=None, **kwargs):
"\n Convert a Sympy expression into a Aesara graph variable.\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression object to convert.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara symbolic\n expression graph.\n\n "
if (not aesara):
raise ImportError('aesara is required for aesara_code')
if (cache is None):
cache = global_cache
return AesaraPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
|
def aesara_code(expr, cache=None, **kwargs):
"\n Convert a Sympy expression into a Aesara graph variable.\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression object to convert.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara symbolic\n expression graph.\n\n "
if (not aesara):
raise ImportError('aesara is required for aesara_code')
if (cache is None):
cache = global_cache
return AesaraPrinter(cache=cache, settings={}).doprint(expr, **kwargs)<|docstring|>Convert a Sympy expression into a Aesara graph variable.
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression object to convert.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara symbolic
expression graph.<|endoftext|>
|
97a8170115514ab888172fc243e8db70cfcbd966839bc30d819c9074fc2b2d51
|
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
'\n Get value of ``broadcastables`` argument to :func:`.aesara_code` from\n keyword arguments to :func:`.aesara_function`.\n\n Included for backwards compatibility.\n\n Parameters\n ==========\n\n inputs\n Sequence of input symbols.\n\n dim : int\n Common number of dimensions for all inputs. Overrides other arguments\n if given.\n\n dims : dict\n Mapping from input symbols to number of dimensions. Overrides\n ``broadcastables`` argument if given.\n\n broadcastables : dict\n Explicit value of ``broadcastables`` argument to\n :meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.\n\n Returns\n =======\n dict\n Dictionary mapping elements of ``inputs`` to their "broadcastable"\n values (tuple of ``bool``\\ s).\n '
if (dim is not None):
return {s: ((False,) * dim) for s in inputs}
if (dims is not None):
maxdim = max(dims.values())
return {s: (((False,) * d) + ((True,) * (maxdim - d))) for (s, d) in dims.items()}
if (broadcastables is not None):
return broadcastables
return {}
|
Get value of ``broadcastables`` argument to :func:`.aesara_code` from
keyword arguments to :func:`.aesara_function`.
Included for backwards compatibility.
Parameters
==========
inputs
Sequence of input symbols.
dim : int
Common number of dimensions for all inputs. Overrides other arguments
if given.
dims : dict
Mapping from input symbols to number of dimensions. Overrides
``broadcastables`` argument if given.
broadcastables : dict
Explicit value of ``broadcastables`` argument to
:meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.
Returns
=======
dict
Dictionary mapping elements of ``inputs`` to their "broadcastable"
values (tuple of ``bool``\ s).
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
dim_handling
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
'\n Get value of ``broadcastables`` argument to :func:`.aesara_code` from\n keyword arguments to :func:`.aesara_function`.\n\n Included for backwards compatibility.\n\n Parameters\n ==========\n\n inputs\n Sequence of input symbols.\n\n dim : int\n Common number of dimensions for all inputs. Overrides other arguments\n if given.\n\n dims : dict\n Mapping from input symbols to number of dimensions. Overrides\n ``broadcastables`` argument if given.\n\n broadcastables : dict\n Explicit value of ``broadcastables`` argument to\n :meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.\n\n Returns\n =======\n dict\n Dictionary mapping elements of ``inputs`` to their "broadcastable"\n values (tuple of ``bool``\\ s).\n '
if (dim is not None):
return {s: ((False,) * dim) for s in inputs}
if (dims is not None):
maxdim = max(dims.values())
return {s: (((False,) * d) + ((True,) * (maxdim - d))) for (s, d) in dims.items()}
if (broadcastables is not None):
return broadcastables
return {}
|
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
'\n Get value of ``broadcastables`` argument to :func:`.aesara_code` from\n keyword arguments to :func:`.aesara_function`.\n\n Included for backwards compatibility.\n\n Parameters\n ==========\n\n inputs\n Sequence of input symbols.\n\n dim : int\n Common number of dimensions for all inputs. Overrides other arguments\n if given.\n\n dims : dict\n Mapping from input symbols to number of dimensions. Overrides\n ``broadcastables`` argument if given.\n\n broadcastables : dict\n Explicit value of ``broadcastables`` argument to\n :meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.\n\n Returns\n =======\n dict\n Dictionary mapping elements of ``inputs`` to their "broadcastable"\n values (tuple of ``bool``\\ s).\n '
if (dim is not None):
return {s: ((False,) * dim) for s in inputs}
if (dims is not None):
maxdim = max(dims.values())
return {s: (((False,) * d) + ((True,) * (maxdim - d))) for (s, d) in dims.items()}
if (broadcastables is not None):
return broadcastables
return {}<|docstring|>Get value of ``broadcastables`` argument to :func:`.aesara_code` from
keyword arguments to :func:`.aesara_function`.
Included for backwards compatibility.
Parameters
==========
inputs
Sequence of input symbols.
dim : int
Common number of dimensions for all inputs. Overrides other arguments
if given.
dims : dict
Mapping from input symbols to number of dimensions. Overrides
``broadcastables`` argument if given.
broadcastables : dict
Explicit value of ``broadcastables`` argument to
:meth:`.AesaraPrinter.doprint`. If not None function will return this value unchanged.
Returns
=======
dict
Dictionary mapping elements of ``inputs`` to their "broadcastable"
values (tuple of ``bool``\ s).<|endoftext|>
|
a8fd1a5560b6ad3485b1bb38974e8b1ab72fb8084bc9ee4263de3da9973d824b
|
def aesara_function(inputs, outputs, scalar=False, *, dim=None, dims=None, broadcastables=None, **kwargs):
'\n Create a Aesara function from SymPy expressions.\n\n The inputs and outputs are converted to Aesara variables using\n :func:`.aesara_code` and then passed to ``aesara.function``.\n\n Parameters\n ==========\n\n inputs\n Sequence of symbols which constitute the inputs of the function.\n\n outputs\n Sequence of expressions which constitute the outputs(s) of the\n function. The free symbols of each expression must be a subset of\n ``inputs``.\n\n scalar : bool\n Convert 0-dimensional arrays in output to scalars. This will return a\n Python wrapper function around the Aesara function object.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n dims : dict\n Alternative to ``broadcastables`` argument. Mapping from elements of\n ``inputs`` to integers indicating the dimension of their associated\n arrays/tensors. Overrides ``broadcastables`` argument if given.\n\n dim : int\n Another alternative to the ``broadcastables`` argument. Common number of\n dimensions to use for all arrays/tensors.\n ``aesara_function([x, y], [...], dim=2)`` is equivalent to using\n ``broadcastables={x: (False, False), y: (False, False)}``.\n\n Returns\n =======\n callable\n A callable object which takes values of ``inputs`` as positional\n arguments and returns an output array for each of the expressions\n in ``outputs``. If ``outputs`` is a single expression the function will\n return a Numpy array, if it is a list of multiple expressions the\n function will return a list of arrays. See description of the ``squeeze``\n argument above for the behavior when a single output is passed in a list.\n The returned object will either be an instance of\n ``aesara.compile.function.types.Function`` or a Python wrapper\n function around one. In both cases, the returned value will have a\n ``aesara_function`` attribute which points to the return value of\n ``aesara.function``.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y, z\n >>> from sympy.printing.aesaracode import aesara_function\n\n A simple function with one input and one output:\n\n >>> f1 = aesara_function([x], [x**2 - 1], scalar=True)\n >>> f1(3)\n 8.0\n\n A function with multiple inputs and one output:\n\n >>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)\n >>> f2(3, 4, 2)\n 5.0\n\n A function with multiple inputs and multiple outputs:\n\n >>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)\n >>> f3(2, 3)\n [13.0, -5.0]\n\n See also\n ========\n\n dim_handling\n\n '
if (not aesara):
raise ImportError('Aesara is required for aesara_function')
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(inputs, dim=dim, dims=dims, broadcastables=broadcastables)
code = partial(aesara_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
toutputs = [(output if isinstance(output, aesara.graph.basic.Variable) else aet.as_tensor_variable(output)) for output in toutputs]
if (len(toutputs) == 1):
toutputs = toutputs[0]
func = aesara.function(tinputs, toutputs, **kwargs)
is_0d = [(len(o.variable.broadcastable) == 0) for o in func.outputs]
if ((not scalar) or (not any(is_0d))):
func.aesara_function = func
return func
def wrapper(*args):
out = func(*args)
if is_sequence(out):
return [(o[()] if is_0d[i] else o) for (i, o) in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.aesara_function = func
return wrapper
|
Create a Aesara function from SymPy expressions.
The inputs and outputs are converted to Aesara variables using
:func:`.aesara_code` and then passed to ``aesara.function``.
Parameters
==========
inputs
Sequence of symbols which constitute the inputs of the function.
outputs
Sequence of expressions which constitute the outputs(s) of the
function. The free symbols of each expression must be a subset of
``inputs``.
scalar : bool
Convert 0-dimensional arrays in output to scalars. This will return a
Python wrapper function around the Aesara function object.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
dims : dict
Alternative to ``broadcastables`` argument. Mapping from elements of
``inputs`` to integers indicating the dimension of their associated
arrays/tensors. Overrides ``broadcastables`` argument if given.
dim : int
Another alternative to the ``broadcastables`` argument. Common number of
dimensions to use for all arrays/tensors.
``aesara_function([x, y], [...], dim=2)`` is equivalent to using
``broadcastables={x: (False, False), y: (False, False)}``.
Returns
=======
callable
A callable object which takes values of ``inputs`` as positional
arguments and returns an output array for each of the expressions
in ``outputs``. If ``outputs`` is a single expression the function will
return a Numpy array, if it is a list of multiple expressions the
function will return a list of arrays. See description of the ``squeeze``
argument above for the behavior when a single output is passed in a list.
The returned object will either be an instance of
``aesara.compile.function.types.Function`` or a Python wrapper
function around one. In both cases, the returned value will have a
``aesara_function`` attribute which points to the return value of
``aesara.function``.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.printing.aesaracode import aesara_function
A simple function with one input and one output:
>>> f1 = aesara_function([x], [x**2 - 1], scalar=True)
>>> f1(3)
8.0
A function with multiple inputs and one output:
>>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)
>>> f2(3, 4, 2)
5.0
A function with multiple inputs and multiple outputs:
>>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)
>>> f3(2, 3)
[13.0, -5.0]
See also
========
dim_handling
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
aesara_function
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def aesara_function(inputs, outputs, scalar=False, *, dim=None, dims=None, broadcastables=None, **kwargs):
'\n Create a Aesara function from SymPy expressions.\n\n The inputs and outputs are converted to Aesara variables using\n :func:`.aesara_code` and then passed to ``aesara.function``.\n\n Parameters\n ==========\n\n inputs\n Sequence of symbols which constitute the inputs of the function.\n\n outputs\n Sequence of expressions which constitute the outputs(s) of the\n function. The free symbols of each expression must be a subset of\n ``inputs``.\n\n scalar : bool\n Convert 0-dimensional arrays in output to scalars. This will return a\n Python wrapper function around the Aesara function object.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n dims : dict\n Alternative to ``broadcastables`` argument. Mapping from elements of\n ``inputs`` to integers indicating the dimension of their associated\n arrays/tensors. Overrides ``broadcastables`` argument if given.\n\n dim : int\n Another alternative to the ``broadcastables`` argument. Common number of\n dimensions to use for all arrays/tensors.\n ``aesara_function([x, y], [...], dim=2)`` is equivalent to using\n ``broadcastables={x: (False, False), y: (False, False)}``.\n\n Returns\n =======\n callable\n A callable object which takes values of ``inputs`` as positional\n arguments and returns an output array for each of the expressions\n in ``outputs``. If ``outputs`` is a single expression the function will\n return a Numpy array, if it is a list of multiple expressions the\n function will return a list of arrays. See description of the ``squeeze``\n argument above for the behavior when a single output is passed in a list.\n The returned object will either be an instance of\n ``aesara.compile.function.types.Function`` or a Python wrapper\n function around one. In both cases, the returned value will have a\n ``aesara_function`` attribute which points to the return value of\n ``aesara.function``.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y, z\n >>> from sympy.printing.aesaracode import aesara_function\n\n A simple function with one input and one output:\n\n >>> f1 = aesara_function([x], [x**2 - 1], scalar=True)\n >>> f1(3)\n 8.0\n\n A function with multiple inputs and one output:\n\n >>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)\n >>> f2(3, 4, 2)\n 5.0\n\n A function with multiple inputs and multiple outputs:\n\n >>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)\n >>> f3(2, 3)\n [13.0, -5.0]\n\n See also\n ========\n\n dim_handling\n\n '
if (not aesara):
raise ImportError('Aesara is required for aesara_function')
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(inputs, dim=dim, dims=dims, broadcastables=broadcastables)
code = partial(aesara_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
toutputs = [(output if isinstance(output, aesara.graph.basic.Variable) else aet.as_tensor_variable(output)) for output in toutputs]
if (len(toutputs) == 1):
toutputs = toutputs[0]
func = aesara.function(tinputs, toutputs, **kwargs)
is_0d = [(len(o.variable.broadcastable) == 0) for o in func.outputs]
if ((not scalar) or (not any(is_0d))):
func.aesara_function = func
return func
def wrapper(*args):
out = func(*args)
if is_sequence(out):
return [(o[()] if is_0d[i] else o) for (i, o) in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.aesara_function = func
return wrapper
|
def aesara_function(inputs, outputs, scalar=False, *, dim=None, dims=None, broadcastables=None, **kwargs):
'\n Create a Aesara function from SymPy expressions.\n\n The inputs and outputs are converted to Aesara variables using\n :func:`.aesara_code` and then passed to ``aesara.function``.\n\n Parameters\n ==========\n\n inputs\n Sequence of symbols which constitute the inputs of the function.\n\n outputs\n Sequence of expressions which constitute the outputs(s) of the\n function. The free symbols of each expression must be a subset of\n ``inputs``.\n\n scalar : bool\n Convert 0-dimensional arrays in output to scalars. This will return a\n Python wrapper function around the Aesara function object.\n\n cache : dict\n Cached Aesara variables (see :class:`AesaraPrinter.cache\n <AesaraPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.AesaraPrinter.doprint`.\n\n dims : dict\n Alternative to ``broadcastables`` argument. Mapping from elements of\n ``inputs`` to integers indicating the dimension of their associated\n arrays/tensors. Overrides ``broadcastables`` argument if given.\n\n dim : int\n Another alternative to the ``broadcastables`` argument. Common number of\n dimensions to use for all arrays/tensors.\n ``aesara_function([x, y], [...], dim=2)`` is equivalent to using\n ``broadcastables={x: (False, False), y: (False, False)}``.\n\n Returns\n =======\n callable\n A callable object which takes values of ``inputs`` as positional\n arguments and returns an output array for each of the expressions\n in ``outputs``. If ``outputs`` is a single expression the function will\n return a Numpy array, if it is a list of multiple expressions the\n function will return a list of arrays. See description of the ``squeeze``\n argument above for the behavior when a single output is passed in a list.\n The returned object will either be an instance of\n ``aesara.compile.function.types.Function`` or a Python wrapper\n function around one. In both cases, the returned value will have a\n ``aesara_function`` attribute which points to the return value of\n ``aesara.function``.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y, z\n >>> from sympy.printing.aesaracode import aesara_function\n\n A simple function with one input and one output:\n\n >>> f1 = aesara_function([x], [x**2 - 1], scalar=True)\n >>> f1(3)\n 8.0\n\n A function with multiple inputs and one output:\n\n >>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)\n >>> f2(3, 4, 2)\n 5.0\n\n A function with multiple inputs and multiple outputs:\n\n >>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)\n >>> f3(2, 3)\n [13.0, -5.0]\n\n See also\n ========\n\n dim_handling\n\n '
if (not aesara):
raise ImportError('Aesara is required for aesara_function')
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(inputs, dim=dim, dims=dims, broadcastables=broadcastables)
code = partial(aesara_code, cache=cache, dtypes=dtypes, broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
toutputs = [(output if isinstance(output, aesara.graph.basic.Variable) else aet.as_tensor_variable(output)) for output in toutputs]
if (len(toutputs) == 1):
toutputs = toutputs[0]
func = aesara.function(tinputs, toutputs, **kwargs)
is_0d = [(len(o.variable.broadcastable) == 0) for o in func.outputs]
if ((not scalar) or (not any(is_0d))):
func.aesara_function = func
return func
def wrapper(*args):
out = func(*args)
if is_sequence(out):
return [(o[()] if is_0d[i] else o) for (i, o) in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.aesara_function = func
return wrapper<|docstring|>Create a Aesara function from SymPy expressions.
The inputs and outputs are converted to Aesara variables using
:func:`.aesara_code` and then passed to ``aesara.function``.
Parameters
==========
inputs
Sequence of symbols which constitute the inputs of the function.
outputs
Sequence of expressions which constitute the outputs(s) of the
function. The free symbols of each expression must be a subset of
``inputs``.
scalar : bool
Convert 0-dimensional arrays in output to scalars. This will return a
Python wrapper function around the Aesara function object.
cache : dict
Cached Aesara variables (see :class:`AesaraPrinter.cache
<AesaraPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.AesaraPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.AesaraPrinter.doprint`.
dims : dict
Alternative to ``broadcastables`` argument. Mapping from elements of
``inputs`` to integers indicating the dimension of their associated
arrays/tensors. Overrides ``broadcastables`` argument if given.
dim : int
Another alternative to the ``broadcastables`` argument. Common number of
dimensions to use for all arrays/tensors.
``aesara_function([x, y], [...], dim=2)`` is equivalent to using
``broadcastables={x: (False, False), y: (False, False)}``.
Returns
=======
callable
A callable object which takes values of ``inputs`` as positional
arguments and returns an output array for each of the expressions
in ``outputs``. If ``outputs`` is a single expression the function will
return a Numpy array, if it is a list of multiple expressions the
function will return a list of arrays. See description of the ``squeeze``
argument above for the behavior when a single output is passed in a list.
The returned object will either be an instance of
``aesara.compile.function.types.Function`` or a Python wrapper
function around one. In both cases, the returned value will have a
``aesara_function`` attribute which points to the return value of
``aesara.function``.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.printing.aesaracode import aesara_function
A simple function with one input and one output:
>>> f1 = aesara_function([x], [x**2 - 1], scalar=True)
>>> f1(3)
8.0
A function with multiple inputs and one output:
>>> f2 = aesara_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)
>>> f2(3, 4, 2)
5.0
A function with multiple inputs and multiple outputs:
>>> f3 = aesara_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)
>>> f3(2, 3)
[13.0, -5.0]
See also
========
dim_handling<|endoftext|>
|
a24e241d2f4ec3634b8818d6925f51828a26387acaf8ecc56ca551b2e840b956
|
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
' Get the cache key for a Sympy object.\n\n Parameters\n ==========\n\n s : sympy.core.basic.Basic\n Sympy object to get key for.\n\n name : str\n Name of object, if it does not have a ``name`` attribute.\n '
if (name is None):
name = s.name
return (name, type(s), s.args, dtype, broadcastable)
|
Get the cache key for a Sympy object.
Parameters
==========
s : sympy.core.basic.Basic
Sympy object to get key for.
name : str
Name of object, if it does not have a ``name`` attribute.
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
_get_key
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
' Get the cache key for a Sympy object.\n\n Parameters\n ==========\n\n s : sympy.core.basic.Basic\n Sympy object to get key for.\n\n name : str\n Name of object, if it does not have a ``name`` attribute.\n '
if (name is None):
name = s.name
return (name, type(s), s.args, dtype, broadcastable)
|
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
' Get the cache key for a Sympy object.\n\n Parameters\n ==========\n\n s : sympy.core.basic.Basic\n Sympy object to get key for.\n\n name : str\n Name of object, if it does not have a ``name`` attribute.\n '
if (name is None):
name = s.name
return (name, type(s), s.args, dtype, broadcastable)<|docstring|>Get the cache key for a Sympy object.
Parameters
==========
s : sympy.core.basic.Basic
Sympy object to get key for.
name : str
Name of object, if it does not have a ``name`` attribute.<|endoftext|>
|
61baa7f8beb71ebc8e97d48adc66b99de8f0d5fd72d7a39824aeab3e460a0811
|
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
'\n Get the Aesara variable for a Sympy symbol from the cache, or create it\n if it does not exist.\n '
if (name is None):
name = s.name
if (dtype is None):
dtype = 'floatX'
if (broadcastable is None):
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if (key in self.cache):
return self.cache[key]
value = aet.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value
|
Get the Aesara variable for a Sympy symbol from the cache, or create it
if it does not exist.
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
_get_or_create
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
'\n Get the Aesara variable for a Sympy symbol from the cache, or create it\n if it does not exist.\n '
if (name is None):
name = s.name
if (dtype is None):
dtype = 'floatX'
if (broadcastable is None):
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if (key in self.cache):
return self.cache[key]
value = aet.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value
|
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
'\n Get the Aesara variable for a Sympy symbol from the cache, or create it\n if it does not exist.\n '
if (name is None):
name = s.name
if (dtype is None):
dtype = 'floatX'
if (broadcastable is None):
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if (key in self.cache):
return self.cache[key]
value = aet.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value<|docstring|>Get the Aesara variable for a Sympy symbol from the cache, or create it
if it does not exist.<|endoftext|>
|
6ef95f636829679af6714077eb6d7e4dabde4689b7b499f7eaf2b34d9633d8bd
|
def doprint(self, expr, dtypes=None, broadcastables=None):
" Convert a Sympy expression to a Aesara graph variable.\n\n The ``dtypes`` and ``broadcastables`` arguments are used to specify the\n data type, dimension, and broadcasting behavior of the Aesara variables\n corresponding to the free symbols in ``expr``. Each is a mapping from\n Sympy symbols to the value of the corresponding argument to\n ``aesara.tensor.var.TensorVariable``.\n\n See the corresponding `documentation page`__ for more information on\n broadcasting in Aesara.\n\n .. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression to print.\n\n dtypes : dict\n Mapping from Sympy symbols to Aesara datatypes to use when creating\n new Aesara variables for those symbols. Corresponds to the ``dtype``\n argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``\n for symbols not included in the mapping.\n\n broadcastables : dict\n Mapping from Sympy symbols to the value of the ``broadcastable``\n argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara\n variables for those symbols. Defaults to the empty tuple for symbols\n not included in the mapping (resulting in a scalar).\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara\n symbolic expression graph.\n\n "
if (dtypes is None):
dtypes = {}
if (broadcastables is None):
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)
|
Convert a Sympy expression to a Aesara graph variable.
The ``dtypes`` and ``broadcastables`` arguments are used to specify the
data type, dimension, and broadcasting behavior of the Aesara variables
corresponding to the free symbols in ``expr``. Each is a mapping from
Sympy symbols to the value of the corresponding argument to
``aesara.tensor.var.TensorVariable``.
See the corresponding `documentation page`__ for more information on
broadcasting in Aesara.
.. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression to print.
dtypes : dict
Mapping from Sympy symbols to Aesara datatypes to use when creating
new Aesara variables for those symbols. Corresponds to the ``dtype``
argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``
for symbols not included in the mapping.
broadcastables : dict
Mapping from Sympy symbols to the value of the ``broadcastable``
argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara
variables for those symbols. Defaults to the empty tuple for symbols
not included in the mapping (resulting in a scalar).
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara
symbolic expression graph.
|
_notes/.obsidian/scripts/sympy-1.9/sympy/printing/aesaracode.py
|
doprint
|
seefrye/waaaaagh-list
| 8,323
|
python
|
def doprint(self, expr, dtypes=None, broadcastables=None):
" Convert a Sympy expression to a Aesara graph variable.\n\n The ``dtypes`` and ``broadcastables`` arguments are used to specify the\n data type, dimension, and broadcasting behavior of the Aesara variables\n corresponding to the free symbols in ``expr``. Each is a mapping from\n Sympy symbols to the value of the corresponding argument to\n ``aesara.tensor.var.TensorVariable``.\n\n See the corresponding `documentation page`__ for more information on\n broadcasting in Aesara.\n\n .. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression to print.\n\n dtypes : dict\n Mapping from Sympy symbols to Aesara datatypes to use when creating\n new Aesara variables for those symbols. Corresponds to the ``dtype``\n argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``\n for symbols not included in the mapping.\n\n broadcastables : dict\n Mapping from Sympy symbols to the value of the ``broadcastable``\n argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara\n variables for those symbols. Defaults to the empty tuple for symbols\n not included in the mapping (resulting in a scalar).\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara\n symbolic expression graph.\n\n "
if (dtypes is None):
dtypes = {}
if (broadcastables is None):
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)
|
def doprint(self, expr, dtypes=None, broadcastables=None):
" Convert a Sympy expression to a Aesara graph variable.\n\n The ``dtypes`` and ``broadcastables`` arguments are used to specify the\n data type, dimension, and broadcasting behavior of the Aesara variables\n corresponding to the free symbols in ``expr``. Each is a mapping from\n Sympy symbols to the value of the corresponding argument to\n ``aesara.tensor.var.TensorVariable``.\n\n See the corresponding `documentation page`__ for more information on\n broadcasting in Aesara.\n\n .. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n Sympy expression to print.\n\n dtypes : dict\n Mapping from Sympy symbols to Aesara datatypes to use when creating\n new Aesara variables for those symbols. Corresponds to the ``dtype``\n argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``\n for symbols not included in the mapping.\n\n broadcastables : dict\n Mapping from Sympy symbols to the value of the ``broadcastable``\n argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara\n variables for those symbols. Defaults to the empty tuple for symbols\n not included in the mapping (resulting in a scalar).\n\n Returns\n =======\n\n aesara.graph.basic.Variable\n A variable corresponding to the expression's value in a Aesara\n symbolic expression graph.\n\n "
if (dtypes is None):
dtypes = {}
if (broadcastables is None):
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)<|docstring|>Convert a Sympy expression to a Aesara graph variable.
The ``dtypes`` and ``broadcastables`` arguments are used to specify the
data type, dimension, and broadcasting behavior of the Aesara variables
corresponding to the free symbols in ``expr``. Each is a mapping from
Sympy symbols to the value of the corresponding argument to
``aesara.tensor.var.TensorVariable``.
See the corresponding `documentation page`__ for more information on
broadcasting in Aesara.
.. __: https://aesara.readthedocs.io/en/latest/tutorial/broadcasting.html
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression to print.
dtypes : dict
Mapping from Sympy symbols to Aesara datatypes to use when creating
new Aesara variables for those symbols. Corresponds to the ``dtype``
argument to ``aesara.tensor.var.TensorVariable``. Defaults to ``'floatX'``
for symbols not included in the mapping.
broadcastables : dict
Mapping from Sympy symbols to the value of the ``broadcastable``
argument to ``aesara.tensor.var.TensorVariable`` to use when creating Aesara
variables for those symbols. Defaults to the empty tuple for symbols
not included in the mapping (resulting in a scalar).
Returns
=======
aesara.graph.basic.Variable
A variable corresponding to the expression's value in a Aesara
symbolic expression graph.<|endoftext|>
|
d57edb9d31dd5aa5807f32d67284f20084dd24b856ea6dbffbbe8be1ab29fe56
|
def getNewestPullBranch(self, r, pullBranchSuffix):
'In the given repo there should be several pull branches named line\n <creation-datetime><pullBranchSuffix>. This function returns the newest one\n of those pull branches'
def getNameOfBranch(b):
return b.name
pb = sorted([h for h in r.heads if h.name.endswith(pullBranchSuffix)], key=getNameOfBranch)
b = pb[(- 1)]
return b
|
In the given repo there should be several pull branches named line
<creation-datetime><pullBranchSuffix>. This function returns the newest one
of those pull branches
|
test/test_pull.py
|
getNewestPullBranch
|
box/wavectl
| 18
|
python
|
def getNewestPullBranch(self, r, pullBranchSuffix):
'In the given repo there should be several pull branches named line\n <creation-datetime><pullBranchSuffix>. This function returns the newest one\n of those pull branches'
def getNameOfBranch(b):
return b.name
pb = sorted([h for h in r.heads if h.name.endswith(pullBranchSuffix)], key=getNameOfBranch)
b = pb[(- 1)]
return b
|
def getNewestPullBranch(self, r, pullBranchSuffix):
'In the given repo there should be several pull branches named line\n <creation-datetime><pullBranchSuffix>. This function returns the newest one\n of those pull branches'
def getNameOfBranch(b):
return b.name
pb = sorted([h for h in r.heads if h.name.endswith(pullBranchSuffix)], key=getNameOfBranch)
b = pb[(- 1)]
return b<|docstring|>In the given repo there should be several pull branches named line
<creation-datetime><pullBranchSuffix>. This function returns the newest one
of those pull branches<|endoftext|>
|
48d97ea68fb425f8c7585191ba13734d0a8ac01b73dd61a86491a8d0e09fe666
|
def repoDirHasUntrackedFiles(self, rsrcType, rsrcs):
'The repoDir has some untracked files and the user attempts to do a\n pull.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
n = 'newFile'
fn = os.path.join(d, n)
with open(fn, 'w') as f:
f.write('Some new modification')
self.executePull(rsrcType, d, r, rsrcs, additionalFileNames=[n], pullAdditionalParams=['--inGit'])
|
The repoDir has some untracked files and the user attempts to do a
pull.
|
test/test_pull.py
|
repoDirHasUntrackedFiles
|
box/wavectl
| 18
|
python
|
def repoDirHasUntrackedFiles(self, rsrcType, rsrcs):
'The repoDir has some untracked files and the user attempts to do a\n pull.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
n = 'newFile'
fn = os.path.join(d, n)
with open(fn, 'w') as f:
f.write('Some new modification')
self.executePull(rsrcType, d, r, rsrcs, additionalFileNames=[n], pullAdditionalParams=['--inGit'])
|
def repoDirHasUntrackedFiles(self, rsrcType, rsrcs):
'The repoDir has some untracked files and the user attempts to do a\n pull.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
n = 'newFile'
fn = os.path.join(d, n)
with open(fn, 'w') as f:
f.write('Some new modification')
self.executePull(rsrcType, d, r, rsrcs, additionalFileNames=[n], pullAdditionalParams=['--inGit'])<|docstring|>The repoDir has some untracked files and the user attempts to do a
pull.<|endoftext|>
|
2e27240de494e4aacfa7fced0f29cdca279de0389846cdea1051e0306fdb5e5e
|
def noChangePull(self, rsrcType, rsrcs):
'Between two pull attempts there has not been any change to the\n resources. The second pull should just work even though there is nothing\n to commit'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
time.sleep(2)
oldRef = r.head.ref
oldCommit = r.head.commit
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
self.assertEqual(oldRef, r.head.ref)
self.assertEqual(oldCommit, r.head.commit)
|
Between two pull attempts there has not been any change to the
resources. The second pull should just work even though there is nothing
to commit
|
test/test_pull.py
|
noChangePull
|
box/wavectl
| 18
|
python
|
def noChangePull(self, rsrcType, rsrcs):
'Between two pull attempts there has not been any change to the\n resources. The second pull should just work even though there is nothing\n to commit'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
time.sleep(2)
oldRef = r.head.ref
oldCommit = r.head.commit
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
self.assertEqual(oldRef, r.head.ref)
self.assertEqual(oldCommit, r.head.commit)
|
def noChangePull(self, rsrcType, rsrcs):
'Between two pull attempts there has not been any change to the\n resources. The second pull should just work even though there is nothing\n to commit'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
time.sleep(2)
oldRef = r.head.ref
oldCommit = r.head.commit
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
self.assertEqual(oldRef, r.head.ref)
self.assertEqual(oldCommit, r.head.commit)<|docstring|>Between two pull attempts there has not been any change to the
resources. The second pull should just work even though there is nothing
to commit<|endoftext|>
|
2d76e3956c4111921a4d8a4a99eb6b776b6260cb6f0da958737596dcb5ba42b8
|
def noBranchNameForMergeIntoBranch(self, rsrcType, rsrcs):
'The "noBranchName" is passed as merge-into-branch parameter. We do not\n expect any merge to the master branch to happen. The pull operation\n should only create a pull branch and switch back to the initial branch'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, [], pullAdditionalParams=['--inGit', '--merge-into-branch', 'None'])
npb = self.getNewestPullBranch(r, wavectl.PullCommand.pullBranchSuffix)
npb.checkout()
self.checkFilesInDir(rsrcType, rsrcs, d, additionalFileNames=['README.md'])
self.assertTrue((not r.is_dirty(untracked_files=True)))
|
The "noBranchName" is passed as merge-into-branch parameter. We do not
expect any merge to the master branch to happen. The pull operation
should only create a pull branch and switch back to the initial branch
|
test/test_pull.py
|
noBranchNameForMergeIntoBranch
|
box/wavectl
| 18
|
python
|
def noBranchNameForMergeIntoBranch(self, rsrcType, rsrcs):
'The "noBranchName" is passed as merge-into-branch parameter. We do not\n expect any merge to the master branch to happen. The pull operation\n should only create a pull branch and switch back to the initial branch'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, [], pullAdditionalParams=['--inGit', '--merge-into-branch', 'None'])
npb = self.getNewestPullBranch(r, wavectl.PullCommand.pullBranchSuffix)
npb.checkout()
self.checkFilesInDir(rsrcType, rsrcs, d, additionalFileNames=['README.md'])
self.assertTrue((not r.is_dirty(untracked_files=True)))
|
def noBranchNameForMergeIntoBranch(self, rsrcType, rsrcs):
'The "noBranchName" is passed as merge-into-branch parameter. We do not\n expect any merge to the master branch to happen. The pull operation\n should only create a pull branch and switch back to the initial branch'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, [], pullAdditionalParams=['--inGit', '--merge-into-branch', 'None'])
npb = self.getNewestPullBranch(r, wavectl.PullCommand.pullBranchSuffix)
npb.checkout()
self.checkFilesInDir(rsrcType, rsrcs, d, additionalFileNames=['README.md'])
self.assertTrue((not r.is_dirty(untracked_files=True)))<|docstring|>The "noBranchName" is passed as merge-into-branch parameter. We do not
expect any merge to the master branch to happen. The pull operation
should only create a pull branch and switch back to the initial branch<|endoftext|>
|
d604d3d259b839d1d73c42ef54e02ae661600e0f8e9f2e34a0251549393ad38b
|
def multiPull(self, rsrcType, rsrcs, firstExpectedRsrs, firstAdditionalParams, secondExpectedRsrs, secondAdditionalParams):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, firstExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=firstAdditionalParams)
time.sleep(2)
self.executePull(rsrcType, d, r, secondExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=secondAdditionalParams)
|
Execute multiple pull operations and make sure all expected resources
are in the final directory
|
test/test_pull.py
|
multiPull
|
box/wavectl
| 18
|
python
|
def multiPull(self, rsrcType, rsrcs, firstExpectedRsrs, firstAdditionalParams, secondExpectedRsrs, secondAdditionalParams):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, firstExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=firstAdditionalParams)
time.sleep(2)
self.executePull(rsrcType, d, r, secondExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=secondAdditionalParams)
|
def multiPull(self, rsrcType, rsrcs, firstExpectedRsrs, firstAdditionalParams, secondExpectedRsrs, secondAdditionalParams):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.createPullBranch(r, wavectl.PullCommand.datetimeFormat, wavectl.PullCommand.pullBranchSuffix)
self.executePull(rsrcType, d, r, firstExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=firstAdditionalParams)
time.sleep(2)
self.executePull(rsrcType, d, r, secondExpectedRsrs, pullAdditionalParams=['--inGit'], rsrcAdditionalParams=secondAdditionalParams)<|docstring|>Execute multiple pull operations and make sure all expected resources
are in the final directory<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.