body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def forward(self, x, timesteps, y=None):
'\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param y: an [N, L] Tensor of texts, if conditional.\n :return: an [N x C x ...] Tensor of outputs.\n '
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if hasattr(self, 'text_encoder'):
y = self.text_encoder(y)
else:
y = None
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb, y=y)
hs.append(h)
h = self.middle_block(h, emb, y=y)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, y=y)
h = h.type(x.dtype)
return self.out(h)
| 3,838,222,474,808,738,000
|
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N, L] Tensor of texts, if conditional.
:return: an [N x C x ...] Tensor of outputs.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
def forward(self, x, timesteps, y=None):
'\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param y: an [N, L] Tensor of texts, if conditional.\n :return: an [N x C x ...] Tensor of outputs.\n '
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if hasattr(self, 'text_encoder'):
y = self.text_encoder(y)
else:
y = None
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb, y=y)
hs.append(h)
h = self.middle_block(h, emb, y=y)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, y=y)
h = h.type(x.dtype)
return self.out(h)
|
def forward(self, x, timesteps):
'\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :return: an [N x K] Tensor of outputs.\n '
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
h = self.middle_block(h, emb).type(self.dtype)
image_features = self.out(h)
image_features = F.normalize(image_features, dim=(- 1))
return image_features
| 4,133,264,892,193,183,000
|
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
|
diff_dalle/unet.py
|
forward
|
AranKomat/Diff-DALLE
|
python
|
def forward(self, x, timesteps):
'\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :return: an [N x K] Tensor of outputs.\n '
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
h = self.middle_block(h, emb).type(self.dtype)
image_features = self.out(h)
image_features = F.normalize(image_features, dim=(- 1))
return image_features
|
def findTimeColumn(row):
'Dynamically determine which column of a log file contains dates.\n\n Parameters:\n row: A row of a logfile\n Returns:\n iterator: An integer defining the row that contains a valid date\n string.\n '
import dateparser
iterator = 0
for item in row:
if item.isdigit():
iterator += 1
continue
this = dateparser.parse(item)
if this:
return iterator
iterator += 1
return None
| 1,817,709,913,675,201,000
|
Dynamically determine which column of a log file contains dates.
Parameters:
row: A row of a logfile
Returns:
iterator: An integer defining the row that contains a valid date
string.
|
cfltools/depreciated/getuniqueip.py
|
findTimeColumn
|
bradley-evans/cfltools
|
python
|
def findTimeColumn(row):
'Dynamically determine which column of a log file contains dates.\n\n Parameters:\n row: A row of a logfile\n Returns:\n iterator: An integer defining the row that contains a valid date\n string.\n '
import dateparser
iterator = 0
for item in row:
if item.isdigit():
iterator += 1
continue
this = dateparser.parse(item)
if this:
return iterator
iterator += 1
return None
|
def scrapeIPs(filename):
'Scrapes all IP addresses from a logfile.\n '
file = open(filename, encoding='utf-8')
logfile_reader = csv.reader(file)
print('Getting the size of the logfile....\n')
logsize = sum((1 for row in logfile_reader))
file.seek(0)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
file.seek(0)
next(logfile_reader)
print((('Processing ' + str(logsize)) + ' entries.'))
iterator = 0
all_ip_address = []
for entry in logfile_reader:
try:
entry_ip_address = entry[ip_column]
all_ip_address.append(entry_ip_address)
iterator = (iterator + 1)
if ((iterator % 1000) == 0):
percentDone = round(Decimal(((iterator / logsize) * 100)), 2)
string = (((((('Currently: Scraping all IPs from file. Entry ' + str(iterator)) + ' of ') + str(logsize)) + ' Percent Done: ') + str(percentDone)) + '%.')
print(string, end='\r')
except UserWarning:
print((('\n* * * Invalid entry detected on line ' + str(iterator)) + '.'))
iterator = (iterator + 1)
print('Line data: ')
print('Using column {} for IP address.'.format(ip_column))
print('Data from that column, for this entry, was {}.'.format(entry[ip_column]))
print(entry)
print('\n')
return all_ip_address
| 7,910,495,692,019,230,000
|
Scrapes all IP addresses from a logfile.
|
cfltools/depreciated/getuniqueip.py
|
scrapeIPs
|
bradley-evans/cfltools
|
python
|
def scrapeIPs(filename):
'\n '
file = open(filename, encoding='utf-8')
logfile_reader = csv.reader(file)
print('Getting the size of the logfile....\n')
logsize = sum((1 for row in logfile_reader))
file.seek(0)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
file.seek(0)
next(logfile_reader)
print((('Processing ' + str(logsize)) + ' entries.'))
iterator = 0
all_ip_address = []
for entry in logfile_reader:
try:
entry_ip_address = entry[ip_column]
all_ip_address.append(entry_ip_address)
iterator = (iterator + 1)
if ((iterator % 1000) == 0):
percentDone = round(Decimal(((iterator / logsize) * 100)), 2)
string = (((((('Currently: Scraping all IPs from file. Entry ' + str(iterator)) + ' of ') + str(logsize)) + ' Percent Done: ') + str(percentDone)) + '%.')
print(string, end='\r')
except UserWarning:
print((('\n* * * Invalid entry detected on line ' + str(iterator)) + '.'))
iterator = (iterator + 1)
print('Line data: ')
print('Using column {} for IP address.'.format(ip_column))
print('Data from that column, for this entry, was {}.'.format(entry[ip_column]))
print(entry)
print('\n')
return all_ip_address
|
def getTimerange(filename, unique_ip_address):
"Naive method to determine the time range during which an IP\n address appears in a logfile.\n\n This is sort of hacky. I'm using timestring to process fairly arbitrary\n text input strings for dates from logs, converting those into POSIX\n dates and times, and then comparing that to a simple integer stored\n in the object to establish our range.\n\n Parameters:\n filename: The logfile we are examining in this job.\n unique_ip_address: A list of IpAddress() objects.\n\n Returns:\n unique_ip_address: A list of unique IPAddress()\n objects with dates included.\n "
import csv
import dateparser
print('Determining date/time ranges for each unique IP...')
file = open(filename, 'r', encoding='utf-8')
logfile_reader = csv.reader(file)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
time_column = findTimeColumn(row)
file.seek(0)
next(logfile_reader)
for ip in unique_ip_address:
file.seek(0)
for entry in logfile_reader:
if (ip.ip == entry[ip_column]):
entry_time = dateparser.parse(entry[time_column], settings={'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True}).timestamp()
if (ip.startTime > entry_time):
ip.startTime = entry_time
if (ip.endTime < entry_time):
ip.endTime = entry_time
return unique_ip_address
| -1,948,701,142,690,386,200
|
Naive method to determine the time range during which an IP
address appears in a logfile.
This is sort of hacky. I'm using timestring to process fairly arbitrary
text input strings for dates from logs, converting those into POSIX
dates and times, and then comparing that to a simple integer stored
in the object to establish our range.
Parameters:
filename: The logfile we are examining in this job.
unique_ip_address: A list of IpAddress() objects.
Returns:
unique_ip_address: A list of unique IPAddress()
objects with dates included.
|
cfltools/depreciated/getuniqueip.py
|
getTimerange
|
bradley-evans/cfltools
|
python
|
def getTimerange(filename, unique_ip_address):
"Naive method to determine the time range during which an IP\n address appears in a logfile.\n\n This is sort of hacky. I'm using timestring to process fairly arbitrary\n text input strings for dates from logs, converting those into POSIX\n dates and times, and then comparing that to a simple integer stored\n in the object to establish our range.\n\n Parameters:\n filename: The logfile we are examining in this job.\n unique_ip_address: A list of IpAddress() objects.\n\n Returns:\n unique_ip_address: A list of unique IPAddress()\n objects with dates included.\n "
import csv
import dateparser
print('Determining date/time ranges for each unique IP...')
file = open(filename, 'r', encoding='utf-8')
logfile_reader = csv.reader(file)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
time_column = findTimeColumn(row)
file.seek(0)
next(logfile_reader)
for ip in unique_ip_address:
file.seek(0)
for entry in logfile_reader:
if (ip.ip == entry[ip_column]):
entry_time = dateparser.parse(entry[time_column], settings={'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True}).timestamp()
if (ip.startTime > entry_time):
ip.startTime = entry_time
if (ip.endTime < entry_time):
ip.endTime = entry_time
return unique_ip_address
|
def get_bigram_pair_string(self, text):
'\n Return a string of text containing part-of-speech, lemma pairs.\n '
bigram_pairs = []
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation
document = self.nlp(text)
if (len(text) <= 2):
bigram_pairs = [token.lemma_.lower() for token in document]
else:
tokens = [token for token in document if (token.is_alpha and (not token.is_stop))]
if (len(tokens) < 2):
tokens = [token for token in document if token.is_alpha]
for index in range(0, len(tokens)):
bigram_pairs.append('{}:{}'.format(tokens[index].pos_, tokens[index].lemma_.lower()))
if (not bigram_pairs):
bigram_pairs = [token.lemma_.lower() for token in document]
return ' '.join(bigram_pairs)
| -4,480,019,657,429,147,000
|
Return a string of text containing part-of-speech, lemma pairs.
|
tagging.py
|
get_bigram_pair_string
|
sciutrux/cbotami
|
python
|
def get_bigram_pair_string(self, text):
'\n \n '
bigram_pairs = []
if (len(text) <= 2):
text_without_punctuation = text.translate(self.punctuation_table)
if (len(text_without_punctuation) >= 1):
text = text_without_punctuation
document = self.nlp(text)
if (len(text) <= 2):
bigram_pairs = [token.lemma_.lower() for token in document]
else:
tokens = [token for token in document if (token.is_alpha and (not token.is_stop))]
if (len(tokens) < 2):
tokens = [token for token in document if token.is_alpha]
for index in range(0, len(tokens)):
bigram_pairs.append('{}:{}'.format(tokens[index].pos_, tokens[index].lemma_.lower()))
if (not bigram_pairs):
bigram_pairs = [token.lemma_.lower() for token in document]
return ' '.join(bigram_pairs)
|
def _get_handler(self, scheme):
'Lazy-load the downloadhandler for a scheme\n only on the first request for that scheme.\n 仅在对该协议的第一个请求时才延迟加载该协议的下载处理程序。\n '
if (scheme in self._handlers):
return self._handlers[scheme]
if (scheme in self._notconfigured):
return None
if (scheme not in self._schemes):
self._notconfigured[scheme] = 'no handler available for that scheme'
return None
return self._load_handler(scheme)
| 126,885,426,239,589,500
|
Lazy-load the downloadhandler for a scheme
only on the first request for that scheme.
仅在对该协议的第一个请求时才延迟加载该协议的下载处理程序。
|
scrapy/core/downloader/handlers/__init__.py
|
_get_handler
|
Hugking/scrapy
|
python
|
def _get_handler(self, scheme):
'Lazy-load the downloadhandler for a scheme\n only on the first request for that scheme.\n 仅在对该协议的第一个请求时才延迟加载该协议的下载处理程序。\n '
if (scheme in self._handlers):
return self._handlers[scheme]
if (scheme in self._notconfigured):
return None
if (scheme not in self._schemes):
self._notconfigured[scheme] = 'no handler available for that scheme'
return None
return self._load_handler(scheme)
|
def _part_ind_KDTree(self, ptype):
'Find the particles in cells using a KDTree approach.'
parent = getattr(self, 'parent', self.base_object)
units = 'code_length'
pos = np.stack([self[('index', 'x')].to(units), self[('index', 'y')].to(units), self[('index', 'z')].to(units)], axis=1).value
dx = np.stack([self[('index', 'dx')].to(units), self[('index', 'dy')].to(units), self[('index', 'dz')].to(units)], axis=1).value
ppos = np.stack([parent[(ptype, 'particle_position_x')], parent[(ptype, 'particle_position_y')], parent[(ptype, 'particle_position_z')]], axis=1).value
mask = np.zeros(ppos.shape[0], dtype=bool)
levels = self[('index', 'grid_level')].astype('int32').value
if (levels.size == 0):
return mask
levelmin = levels.min()
levelmax = levels.max()
for lvl in range(levelmax, (levelmin - 1), (- 1)):
lvl_mask = (levels == lvl)
dx_loc = dx[lvl_mask]
pos_loc = pos[lvl_mask]
grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)
(dist, icell) = grid_tree.query(ppos[(~ mask)], distance_upper_bound=dx_loc.max(), p=np.inf)
mask_loc = np.isfinite(dist[:])
i = icell[mask_loc]
dist = np.abs((ppos[(~ mask)][mask_loc, :] - pos_loc[i]))
tmp_mask = np.all((dist <= (dx_loc[i] / 2)), axis=1)
mask_loc[mask_loc] = tmp_mask
mask[(~ mask)] |= mask_loc
return mask
| 1,242,186,821,144,728,600
|
Find the particles in cells using a KDTree approach.
|
yt/data_objects/selection_objects/cut_region.py
|
_part_ind_KDTree
|
chummels/yt
|
python
|
def _part_ind_KDTree(self, ptype):
parent = getattr(self, 'parent', self.base_object)
units = 'code_length'
pos = np.stack([self[('index', 'x')].to(units), self[('index', 'y')].to(units), self[('index', 'z')].to(units)], axis=1).value
dx = np.stack([self[('index', 'dx')].to(units), self[('index', 'dy')].to(units), self[('index', 'dz')].to(units)], axis=1).value
ppos = np.stack([parent[(ptype, 'particle_position_x')], parent[(ptype, 'particle_position_y')], parent[(ptype, 'particle_position_z')]], axis=1).value
mask = np.zeros(ppos.shape[0], dtype=bool)
levels = self[('index', 'grid_level')].astype('int32').value
if (levels.size == 0):
return mask
levelmin = levels.min()
levelmax = levels.max()
for lvl in range(levelmax, (levelmin - 1), (- 1)):
lvl_mask = (levels == lvl)
dx_loc = dx[lvl_mask]
pos_loc = pos[lvl_mask]
grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)
(dist, icell) = grid_tree.query(ppos[(~ mask)], distance_upper_bound=dx_loc.max(), p=np.inf)
mask_loc = np.isfinite(dist[:])
i = icell[mask_loc]
dist = np.abs((ppos[(~ mask)][mask_loc, :] - pos_loc[i]))
tmp_mask = np.all((dist <= (dx_loc[i] / 2)), axis=1)
mask_loc[mask_loc] = tmp_mask
mask[(~ mask)] |= mask_loc
return mask
|
def _get_bbox(self):
'\n Get the bounding box for the cut region. Here we just use\n the bounding box for the source region.\n '
return self.base_object._get_bbox()
| -3,642,009,883,120,007,000
|
Get the bounding box for the cut region. Here we just use
the bounding box for the source region.
|
yt/data_objects/selection_objects/cut_region.py
|
_get_bbox
|
chummels/yt
|
python
|
def _get_bbox(self):
'\n Get the bounding box for the cut region. Here we just use\n the bounding box for the source region.\n '
return self.base_object._get_bbox()
|
def first(seq, key=(lambda x: bool(x)), default=None, apply=(lambda x: x)):
"Give the first value that satisfies the key test.\n\n Args:\n seq (iterable):\n key (callable): test for each element of iterable\n default: returned when all elements fail test\n apply (callable): applied to element before return, but not to default value\n\n Returns: first element in seq that passes key, mutated with optional apply\n\n Examples:\n >>> first([0, False, None, [], (), 42])\n 42\n >>> first([0, False, None, [], ()]) is None\n True\n >>> first([0, False, None, [], ()], default='ohai')\n 'ohai'\n >>> import re\n >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])\n >>> m.group(1)\n 'bc'\n\n The optional `key` argument specifies a one-argument predicate function\n like that used for `filter()`. The `key` argument, if supplied, must be\n in keyword form. For example:\n >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)\n 4\n\n "
return next((apply(x) for x in seq if key(x)), (default() if callable(default) else default))
| -1,724,293,131,468,870,000
|
Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
|
lib/python3.7/site-packages/conda/_vendor/auxlib/collection.py
|
first
|
AXGKl/be_black
|
python
|
def first(seq, key=(lambda x: bool(x)), default=None, apply=(lambda x: x)):
"Give the first value that satisfies the key test.\n\n Args:\n seq (iterable):\n key (callable): test for each element of iterable\n default: returned when all elements fail test\n apply (callable): applied to element before return, but not to default value\n\n Returns: first element in seq that passes key, mutated with optional apply\n\n Examples:\n >>> first([0, False, None, [], (), 42])\n 42\n >>> first([0, False, None, [], ()]) is None\n True\n >>> first([0, False, None, [], ()], default='ohai')\n 'ohai'\n >>> import re\n >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])\n >>> m.group(1)\n 'bc'\n\n The optional `key` argument specifies a one-argument predicate function\n like that used for `filter()`. The `key` argument, if supplied, must be\n in keyword form. For example:\n >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)\n 4\n\n "
return next((apply(x) for x in seq if key(x)), (default() if callable(default) else default))
|
def call_each(seq):
'Calls each element of sequence to invoke the side effect.\n\n Args:\n seq:\n\n Returns: None\n\n '
try:
reduce((lambda _, y: y()), seq)
except TypeError as e:
if (text_type(e) != 'reduce() of empty sequence with no initial value'):
raise
| 8,482,218,526,092,047,000
|
Calls each element of sequence to invoke the side effect.
Args:
seq:
Returns: None
|
lib/python3.7/site-packages/conda/_vendor/auxlib/collection.py
|
call_each
|
AXGKl/be_black
|
python
|
def call_each(seq):
'Calls each element of sequence to invoke the side effect.\n\n Args:\n seq:\n\n Returns: None\n\n '
try:
reduce((lambda _, y: y()), seq)
except TypeError as e:
if (text_type(e) != 'reduce() of empty sequence with no initial value'):
raise
|
@click.command()
@environment.pass_env
def cli(env):
'List options for creating a placement group.'
manager = PlacementManager(env.client)
routers = manager.get_routers()
env.fout(get_router_table(routers))
rules = manager.get_all_rules()
env.fout(get_rule_table(rules))
| -4,255,090,912,820,993,500
|
List options for creating a placement group.
|
SoftLayer/CLI/virt/placementgroup/create_options.py
|
cli
|
ATGE/softlayer-python
|
python
|
@click.command()
@environment.pass_env
def cli(env):
manager = PlacementManager(env.client)
routers = manager.get_routers()
env.fout(get_router_table(routers))
rules = manager.get_all_rules()
env.fout(get_rule_table(rules))
|
def get_router_table(routers):
'Formats output from _get_routers and returns a table. '
table = formatting.Table(['Datacenter', 'Hostname', 'Backend Router Id'], 'Available Routers')
for router in routers:
datacenter = router['topLevelLocation']['longName']
table.add_row([datacenter, router['hostname'], router['id']])
return table
| -8,719,616,408,360,183,000
|
Formats output from _get_routers and returns a table.
|
SoftLayer/CLI/virt/placementgroup/create_options.py
|
get_router_table
|
ATGE/softlayer-python
|
python
|
def get_router_table(routers):
' '
table = formatting.Table(['Datacenter', 'Hostname', 'Backend Router Id'], 'Available Routers')
for router in routers:
datacenter = router['topLevelLocation']['longName']
table.add_row([datacenter, router['hostname'], router['id']])
return table
|
def get_rule_table(rules):
'Formats output from get_all_rules and returns a table. '
table = formatting.Table(['Id', 'KeyName'], 'Rules')
for rule in rules:
table.add_row([rule['id'], rule['keyName']])
return table
| -5,018,079,132,225,288,000
|
Formats output from get_all_rules and returns a table.
|
SoftLayer/CLI/virt/placementgroup/create_options.py
|
get_rule_table
|
ATGE/softlayer-python
|
python
|
def get_rule_table(rules):
' '
table = formatting.Table(['Id', 'KeyName'], 'Rules')
for rule in rules:
table.add_row([rule['id'], rule['keyName']])
return table
|
def reconstruct(ri, li, rs, v, x, y, phix, phiy):
'\n Takes x, y gradients to the solution to screen mapping potential problem and\n reconstructs the perpendicular deflection fields wBx and wBy.\n\n Args:\n ri (float): Distance from source to plasma (cm).\n li (float): Distance across plasma (cm).\n rs (float): Distance from plasma to screen (cm).\n v (float): Velocity of protons (cm/s).\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n\n Returns:\n wBx (array)\n \n '
magnify = (((rs + ri) + (0.5 * li)) / (ri + (0.5 * li)))
map_pot_x = np.copy(phix)
map_pot_y = np.copy(phiy)
plasma_x = np.copy(x)
plasma_y = np.copy(y)
wBx = ((magnify * (v / rs)) * (map_pot_x - plasma_x))
wBy = ((magnify * (v / rs)) * (map_pot_y - plasma_y))
return (wBx, wBy)
| 2,081,807,718,555,341,300
|
Takes x, y gradients to the solution to screen mapping potential problem and
reconstructs the perpendicular deflection fields wBx and wBy.
Args:
ri (float): Distance from source to plasma (cm).
li (float): Distance across plasma (cm).
rs (float): Distance from plasma to screen (cm).
v (float): Velocity of protons (cm/s).
x (array): Plasma x-coordinates (cm).
y (array): Plasma x-coordinates (cm).
phix (array): Gradient of screen mapping potential in x-direction.
phiy (array): Gradient of screen mapping potential in y-direction.
Returns:
wBx (array)
|
problem/deflect.py
|
reconstruct
|
flash-center/PROBLEM
|
python
|
def reconstruct(ri, li, rs, v, x, y, phix, phiy):
'\n Takes x, y gradients to the solution to screen mapping potential problem and\n reconstructs the perpendicular deflection fields wBx and wBy.\n\n Args:\n ri (float): Distance from source to plasma (cm).\n li (float): Distance across plasma (cm).\n rs (float): Distance from plasma to screen (cm).\n v (float): Velocity of protons (cm/s).\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n\n Returns:\n wBx (array)\n \n '
magnify = (((rs + ri) + (0.5 * li)) / (ri + (0.5 * li)))
map_pot_x = np.copy(phix)
map_pot_y = np.copy(phiy)
plasma_x = np.copy(x)
plasma_y = np.copy(y)
wBx = ((magnify * (v / rs)) * (map_pot_x - plasma_x))
wBy = ((magnify * (v / rs)) * (map_pot_y - plasma_y))
return (wBx, wBy)
|
def magpath(wBx, wBy):
'\n Takes the perpendicular deflection field and reconstructs the path\n integrated magnetic field.\n\n Args:\n wBx (array): x-component perpendicular deflection field.\n wBy (array): y-component perpendicular deflection field.\n\n Returns:\n Bxpath (array): Path integrated magnetic field x-component. \n Bypath (array): Path integrated magnetic field y-component.\n '
Bxpath = ((- ((M_PROTON_G * C_CMS) / ESU)) * wBy)
Bypath = (((M_PROTON_G * C_CMS) / ESU) * wBx)
return (Bxpath, Bypath)
| -5,663,707,700,662,836,000
|
Takes the perpendicular deflection field and reconstructs the path
integrated magnetic field.
Args:
wBx (array): x-component perpendicular deflection field.
wBy (array): y-component perpendicular deflection field.
Returns:
Bxpath (array): Path integrated magnetic field x-component.
Bypath (array): Path integrated magnetic field y-component.
|
problem/deflect.py
|
magpath
|
flash-center/PROBLEM
|
python
|
def magpath(wBx, wBy):
'\n Takes the perpendicular deflection field and reconstructs the path\n integrated magnetic field.\n\n Args:\n wBx (array): x-component perpendicular deflection field.\n wBy (array): y-component perpendicular deflection field.\n\n Returns:\n Bxpath (array): Path integrated magnetic field x-component. \n Bypath (array): Path integrated magnetic field y-component.\n '
Bxpath = ((- ((M_PROTON_G * C_CMS) / ESU)) * wBy)
Bypath = (((M_PROTON_G * C_CMS) / ESU) * wBx)
return (Bxpath, Bypath)
|
def fluximage(ri, li, rs, v, x, y, N, wBx, wBy):
'\n Creates a flux image out of a perpendicular deflection field. \n\n Args:\n ri:\n li:\n rs:\n v:\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n\n Returns:\n flux_image (array): Generated flux image.\n '
magnify = (((rs + ri) + (0.5 * li)) / (ri + (0.5 * li)))
print('Creating interpolator functions...')
fwBx = sp.interpolate.RegularGridInterpolator((x[:, 0], y[0, :]), wBx, bounds_error=False)
fwBy = sp.interpolate.RegularGridInterpolator((x[:, 0], y[0, :]), wBy, bounds_error=False)
print('DONE')
prot_num = int(np.sqrt(N))
dx = (x[(1, 0)] - x[(0, 0)])
dy = (y[(0, 1)] - y[(0, 0)])
samp_x = np.linspace((x[(0, 0)] + (0.5 * dx)), (x[((- 1), 0)] - (0.5 * dx)), num=prot_num)
samp_y = np.linspace((y[(0, 0)] + (0.5 * dy)), (y[(0, (- 1))] - (0.5 * dy)), num=prot_num)
(samp_x, samp_y) = np.meshgrid(samp_x, samp_y, indexing='ij')
print('Interpolating proton deflections...')
samp_wBx = fwBx((samp_x, samp_y))
samp_wBy = fwBy((samp_x, samp_y))
print('DONE')
screen_x = ((magnify * samp_x) + ((rs / v) * samp_wBx))
screen_y = ((magnify * samp_y) + ((rs / v) * samp_wBy))
print('Histogramming protons...')
flux_image = np.histogram2d(screen_x.ravel(), screen_y.ravel(), bins=x.shape)
print('DONE')
return flux_image[0]
| 9,105,284,645,734,496,000
|
Creates a flux image out of a perpendicular deflection field.
Args:
ri:
li:
rs:
v:
x (array): Perpendicular deflection field x-coordinates.
y (array): Perpendicular deflection field y-coordinates.
wBx (array): Perpendicular deflection field x-component.
wBy (array): Perpendicular deflection field y-component.
Returns:
flux_image (array): Generated flux image.
|
problem/deflect.py
|
fluximage
|
flash-center/PROBLEM
|
python
|
def fluximage(ri, li, rs, v, x, y, N, wBx, wBy):
'\n Creates a flux image out of a perpendicular deflection field. \n\n Args:\n ri:\n li:\n rs:\n v:\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n\n Returns:\n flux_image (array): Generated flux image.\n '
magnify = (((rs + ri) + (0.5 * li)) / (ri + (0.5 * li)))
print('Creating interpolator functions...')
fwBx = sp.interpolate.RegularGridInterpolator((x[:, 0], y[0, :]), wBx, bounds_error=False)
fwBy = sp.interpolate.RegularGridInterpolator((x[:, 0], y[0, :]), wBy, bounds_error=False)
print('DONE')
prot_num = int(np.sqrt(N))
dx = (x[(1, 0)] - x[(0, 0)])
dy = (y[(0, 1)] - y[(0, 0)])
samp_x = np.linspace((x[(0, 0)] + (0.5 * dx)), (x[((- 1), 0)] - (0.5 * dx)), num=prot_num)
samp_y = np.linspace((y[(0, 0)] + (0.5 * dy)), (y[(0, (- 1))] - (0.5 * dy)), num=prot_num)
(samp_x, samp_y) = np.meshgrid(samp_x, samp_y, indexing='ij')
print('Interpolating proton deflections...')
samp_wBx = fwBx((samp_x, samp_y))
samp_wBy = fwBy((samp_x, samp_y))
print('DONE')
screen_x = ((magnify * samp_x) + ((rs / v) * samp_wBx))
screen_y = ((magnify * samp_y) + ((rs / v) * samp_wBy))
print('Histogramming protons...')
flux_image = np.histogram2d(screen_x.ravel(), screen_y.ravel(), bins=x.shape)
print('DONE')
return flux_image[0]
|
def fluximage2(x, y, phix, phiy, flux0, scale_fact=1, scale_order=3):
'\n An alternative approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n scale_fact: Integer factor by which to upscale arrays before analysis; a larger number slows the algorithm but fills out low-flux regions better\n scale_order: Order of the spline interpolation for scipy.ndimage.zoom\n Returns:\n flux_image (array): Generated flux image.\n '
xgv = x[:, 0].flatten()
ygv = y[0, :].flatten()
if (scale_fact != 1):
print('Rescaling...')
xgv = scipy.ndimage.zoom(xgv, scale_fact, order=scale_order)
ygv = scipy.ndimage.zoom(ygv, scale_fact, order=scale_order)
phix = scipy.ndimage.zoom(phix, scale_fact, order=scale_order)
phiy = scipy.ndimage.zoom(phiy, scale_fact, order=scale_order)
flux0 = scipy.ndimage.zoom(flux0, scale_fact, order=scale_order)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append((xgv - (dx / 2.0)), (xgv[(- 1)] + (dx / 2.0)))
y_edges = np.append((ygv - (dy / 2.0)), (ygv[(- 1)] + (dy / 2.0)))
print('Performing histogram...')
(flux_image, _, _) = np.histogram2d(phix.flatten(), phiy.flatten(), bins=[x_edges, y_edges], weights=flux0.flatten())
if (scale_fact != 1):
print('Descaling...')
flux_image = scipy.misc.imresize(flux_image, (1.0 / scale_fact), mode='F')
print('DONE')
return flux_image
| 2,417,979,036,212,821,500
|
An alternative approach to creating a flux image out of a perpendicular deflection field.
Args:
x (array): Plasma x-coordinates (cm).
y (array): Plasma x-coordinates (cm).
phix (array): Gradient of screen mapping potential in x-direction.
phiy (array): Gradient of screen mapping potential in y-direction.
scale_fact: Integer factor by which to upscale arrays before analysis; a larger number slows the algorithm but fills out low-flux regions better
scale_order: Order of the spline interpolation for scipy.ndimage.zoom
Returns:
flux_image (array): Generated flux image.
|
problem/deflect.py
|
fluximage2
|
flash-center/PROBLEM
|
python
|
def fluximage2(x, y, phix, phiy, flux0, scale_fact=1, scale_order=3):
'\n An alternative approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n scale_fact: Integer factor by which to upscale arrays before analysis; a larger number slows the algorithm but fills out low-flux regions better\n scale_order: Order of the spline interpolation for scipy.ndimage.zoom\n Returns:\n flux_image (array): Generated flux image.\n '
xgv = x[:, 0].flatten()
ygv = y[0, :].flatten()
if (scale_fact != 1):
print('Rescaling...')
xgv = scipy.ndimage.zoom(xgv, scale_fact, order=scale_order)
ygv = scipy.ndimage.zoom(ygv, scale_fact, order=scale_order)
phix = scipy.ndimage.zoom(phix, scale_fact, order=scale_order)
phiy = scipy.ndimage.zoom(phiy, scale_fact, order=scale_order)
flux0 = scipy.ndimage.zoom(flux0, scale_fact, order=scale_order)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append((xgv - (dx / 2.0)), (xgv[(- 1)] + (dx / 2.0)))
y_edges = np.append((ygv - (dy / 2.0)), (ygv[(- 1)] + (dy / 2.0)))
print('Performing histogram...')
(flux_image, _, _) = np.histogram2d(phix.flatten(), phiy.flatten(), bins=[x_edges, y_edges], weights=flux0.flatten())
if (scale_fact != 1):
print('Descaling...')
flux_image = scipy.misc.imresize(flux_image, (1.0 / scale_fact), mode='F')
print('DONE')
return flux_image
|
def fluximage3(ri, li, rs, v, x, y, N, wBx, wBy, Ntest):
'\n A Monte Carlo approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n ri:\n li:\n rs:\n v:\n N: Number of protons in reality\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n Ntest: Number of test protons (Monte Carlo)\n\n Returns:\n flux_image (array): Generated flux image.\n '
magnify = (((rs + li) + ri) / (ri + (0.5 * li)))
xgv = x[:, 0].flatten()
ygv = y[0, :].flatten()
xmin = np.min(xgv)
xmax = np.max(xgv)
ymin = np.min(ygv)
ymax = np.max(ygv)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append((xgv - (dx / 2.0)), (xgv[(- 1)] + (dx / 2.0)))
y_edges = np.append((ygv - (dy / 2.0)), (ygv[(- 1)] + (dy / 2.0)))
xd = np.random.uniform(xmin, xmax, size=(Ntest,))
yd = np.random.uniform(ymin, ymax, size=(Ntest,))
xyd = np.stack((xd, yd), axis=1)
wBxd = sp.interpolate.interpn((xgv, ygv), wBx, xyd, method='linear')
wByd = sp.interpolate.interpn((xgv, ygv), wBy, xyd, method='linear')
xfd = (xd + ((rs / (magnify * v)) * wBxd))
yfd = (yd + ((rs / (magnify * v)) * wByd))
print('Histogramming reference...')
(flux_ref, _, _) = np.histogram2d(xd, yd, bins=[x_edges, y_edges])
flux_ref = ((flux_ref * N) / Ntest)
print('Histogramming signal...')
(flux_image, _, _) = np.histogram2d(xfd, yfd, bins=[x_edges, y_edges])
flux_image = ((flux_image * N) / Ntest)
print('DONE')
return (flux_image, flux_ref)
| 4,024,405,721,791,568,400
|
A Monte Carlo approach to creating a flux image out of a perpendicular deflection field.
Args:
ri:
li:
rs:
v:
N: Number of protons in reality
x (array): Perpendicular deflection field x-coordinates.
y (array): Perpendicular deflection field y-coordinates.
wBx (array): Perpendicular deflection field x-component.
wBy (array): Perpendicular deflection field y-component.
Ntest: Number of test protons (Monte Carlo)
Returns:
flux_image (array): Generated flux image.
|
problem/deflect.py
|
fluximage3
|
flash-center/PROBLEM
|
python
|
def fluximage3(ri, li, rs, v, x, y, N, wBx, wBy, Ntest):
'\n A Monte Carlo approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n ri:\n li:\n rs:\n v:\n N: Number of protons in reality\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n Ntest: Number of test protons (Monte Carlo)\n\n Returns:\n flux_image (array): Generated flux image.\n '
magnify = (((rs + li) + ri) / (ri + (0.5 * li)))
xgv = x[:, 0].flatten()
ygv = y[0, :].flatten()
xmin = np.min(xgv)
xmax = np.max(xgv)
ymin = np.min(ygv)
ymax = np.max(ygv)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append((xgv - (dx / 2.0)), (xgv[(- 1)] + (dx / 2.0)))
y_edges = np.append((ygv - (dy / 2.0)), (ygv[(- 1)] + (dy / 2.0)))
xd = np.random.uniform(xmin, xmax, size=(Ntest,))
yd = np.random.uniform(ymin, ymax, size=(Ntest,))
xyd = np.stack((xd, yd), axis=1)
wBxd = sp.interpolate.interpn((xgv, ygv), wBx, xyd, method='linear')
wByd = sp.interpolate.interpn((xgv, ygv), wBy, xyd, method='linear')
xfd = (xd + ((rs / (magnify * v)) * wBxd))
yfd = (yd + ((rs / (magnify * v)) * wByd))
print('Histogramming reference...')
(flux_ref, _, _) = np.histogram2d(xd, yd, bins=[x_edges, y_edges])
flux_ref = ((flux_ref * N) / Ntest)
print('Histogramming signal...')
(flux_image, _, _) = np.histogram2d(xfd, yfd, bins=[x_edges, y_edges])
flux_image = ((flux_image * N) / Ntest)
print('DONE')
return (flux_image, flux_ref)
|
def __init__(self, quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], weight_quantize_type='abs_max', activation_quantize_type='moving_average_abs_max', weight_bits=8, activation_bits=8, moving_rate=0.9, weight_preprocess_layer=None, act_preprocess_layer=None, weight_quantize_layer=None, act_quantize_layer=None):
'\n The constructor for ImperativeQuantAware.\n\n Args:\n quantizable_layer_type(list[str | layer]): List the type of\n layers that will be quantized. Default is [\'Conv2D\', \'Linear\'].\n weight_quantize_type(str): quantization type for weights,\n which supports \'abs_max\' and \'channel_wise_abs_max\'.\n activation_quantize_type(str): quantization type for activations,\n which supports \'abs_max\' and \'moving_average_abs_max\' now.\n If using \'abs_max\' mode, the quantization scale will be\n calculated dynamically each step in both training and testing\n period. If using \'moving_average_abs_max\', the static\n quantization scale will be calculated during training and\n used in inference.\n weight_bits(int): quantization bit number for weights, whereas\n the bias is not quantized.\n activation_bits(int): quantization bit number for activations.\n moving_rate(float): the parameter for \'moving_average_abs_max\'\n quantization.\n weight_preprocess_layer(paddle.nn.Layer, optional): A paddle\n Layer that defines how to preprocess weight before quantization.\n Using this can quickly test if user\'s preprocess method works\n or not. The input is non-quantized weight and function returns\n processed weight to be quantized.\n If None, the weight will be quantized directly.\n Default is None.\n act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer\n that defines how to preprocess activation before quantization.\n Using this can quickly test if user\'s preprocess method works\n or not. The input is non-quantized activation and function returns\n processed activation to be quantized.\n If None, the activation will be quantized directly.\n Default is None.\n weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that\n defines how to quantize weight.\n Using this can quickly test if user\'s quantization method works or not.\n In this layer, user should both define quantization method and\n dequantization method, that is, the function\'s input is non-quantized\n weight and returns dequantized weight.\n If None, will use uantization op defined by \'weight_quantize_type\'.\n Default is None.\n act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines\n how to quantize activation.\n Using this can quickly test if user\'s quantization method works or not.\n In this layer, user should both define quantization method and\n dequantization method, that is, the function\'s input is non-quantized\n activation and returns dequantized activation. \n If None, will use quantization op defined by \'activation_quantize_type\'.\n Default is None.\n\n Note:\n If user sets attribute \'skip_quant\' to a Layer that support dynamic\n quantization and sets it to true, the layer would not be quantized\n during training. If this attribute is not sets or the attribute is\n false, the Layer would be qunatized in training.\n\n Examples 1:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n from paddle.vision.models import resnet\n \n model = resnet.resnet50(pretrained=True)\n\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type=\'abs_max\',\n activation_quantize_type=\'moving_average_abs_max\')\n \n # Add the fake quant logical.\n # The original model will be rewrite.\n # The outscale of outputs in supportted layers would be calculated.\n imperative_qat.quantize(model)\n\n # Fine-tune the quantized model\n # ...\n \n # Save quant model for the inference.\n imperative_qat.save_quantized_model(\n layer=model,\n model_path="./resnet50_qat",\n input_spec=[\n paddle.static.InputSpec(\n shape=[None, 3, 224, 224], dtype=\'float32\')])\n\n Examples 2:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n\n class ImperativeModel(paddle.nn.Layer):\n def __init__(self):\n super(ImperativeModel, self).__init__()\n # self.linear_0 would skip the quantization.\n self.linear_0 = paddle.nn.Linear(784, 400)\n self.linear_0.skip_quant = True\n\n # self.linear_1 would not skip the quantization.\n self.linear_1 = paddle.nn.Linear(400, 10)\n self.linear_1.skip_quant = False\n\n def forward(self, inputs):\n x = self.linear_0(inputs)\n x = self.linear_1(inputs)\n return x\n\n model = ImperativeModel()\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type=\'abs_max\',\n activation_quantize_type=\'moving_average_abs_max\')\n\n # Add the fake quant logical.\n # The original model will be rewrite.\n #\n # There is only one Layer(self.linear1) would be added the\n # fake quant logical.\n imperative_qat.quantize(model)\n\n # Fine-tune the quantized model\n # ...\n\n # Save quant model for the inference.\n imperative_qat.save_quantized_model(\n layer=model,\n model_path="./imperative_model_qat")\n '
super(ImperativeQuantAware, self).__init__()
kwargs = {'quantizable_layer_type': quantizable_layer_type, 'weight_quantize_type': weight_quantize_type, 'activation_quantize_type': activation_quantize_type, 'weight_bits': weight_bits, 'activation_bits': activation_bits, 'moving_rate': moving_rate, 'weight_preprocess_layer': weight_preprocess_layer, 'act_preprocess_layer': act_preprocess_layer, 'weight_quantize_layer': weight_quantize_layer, 'act_quantize_layer': act_quantize_layer}
self._quantize_inputs = ImperativeQuantizeInputs(**kwargs)
self._quantize_outputs = ImperativeQuantizeOutputs(moving_rate)
| -3,078,167,706,314,112,500
|
The constructor for ImperativeQuantAware.
Args:
quantizable_layer_type(list[str | layer]): List the type of
layers that will be quantized. Default is ['Conv2D', 'Linear'].
weight_quantize_type(str): quantization type for weights,
which supports 'abs_max' and 'channel_wise_abs_max'.
activation_quantize_type(str): quantization type for activations,
which supports 'abs_max' and 'moving_average_abs_max' now.
If using 'abs_max' mode, the quantization scale will be
calculated dynamically each step in both training and testing
period. If using 'moving_average_abs_max', the static
quantization scale will be calculated during training and
used in inference.
weight_bits(int): quantization bit number for weights, whereas
the bias is not quantized.
activation_bits(int): quantization bit number for activations.
moving_rate(float): the parameter for 'moving_average_abs_max'
quantization.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle
Layer that defines how to preprocess weight before quantization.
Using this can quickly test if user's preprocess method works
or not. The input is non-quantized weight and function returns
processed weight to be quantized.
If None, the weight will be quantized directly.
Default is None.
act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer
that defines how to preprocess activation before quantization.
Using this can quickly test if user's preprocess method works
or not. The input is non-quantized activation and function returns
processed activation to be quantized.
If None, the activation will be quantized directly.
Default is None.
weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that
defines how to quantize weight.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
weight and returns dequantized weight.
If None, will use uantization op defined by 'weight_quantize_type'.
Default is None.
act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines
how to quantize activation.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
activation and returns dequantized activation.
If None, will use quantization op defined by 'activation_quantize_type'.
Default is None.
Note:
If user sets attribute 'skip_quant' to a Layer that support dynamic
quantization and sets it to true, the layer would not be quantized
during training. If this attribute is not sets or the attribute is
false, the Layer would be qunatized in training.
Examples 1:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.vision.models import resnet
model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
__init__
|
MissPenguin/Paddle
|
python
|
def __init__(self, quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], weight_quantize_type='abs_max', activation_quantize_type='moving_average_abs_max', weight_bits=8, activation_bits=8, moving_rate=0.9, weight_preprocess_layer=None, act_preprocess_layer=None, weight_quantize_layer=None, act_quantize_layer=None):
'\n The constructor for ImperativeQuantAware.\n\n Args:\n quantizable_layer_type(list[str | layer]): List the type of\n layers that will be quantized. Default is [\'Conv2D\', \'Linear\'].\n weight_quantize_type(str): quantization type for weights,\n which supports \'abs_max\' and \'channel_wise_abs_max\'.\n activation_quantize_type(str): quantization type for activations,\n which supports \'abs_max\' and \'moving_average_abs_max\' now.\n If using \'abs_max\' mode, the quantization scale will be\n calculated dynamically each step in both training and testing\n period. If using \'moving_average_abs_max\', the static\n quantization scale will be calculated during training and\n used in inference.\n weight_bits(int): quantization bit number for weights, whereas\n the bias is not quantized.\n activation_bits(int): quantization bit number for activations.\n moving_rate(float): the parameter for \'moving_average_abs_max\'\n quantization.\n weight_preprocess_layer(paddle.nn.Layer, optional): A paddle\n Layer that defines how to preprocess weight before quantization.\n Using this can quickly test if user\'s preprocess method works\n or not. The input is non-quantized weight and function returns\n processed weight to be quantized.\n If None, the weight will be quantized directly.\n Default is None.\n act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer\n that defines how to preprocess activation before quantization.\n Using this can quickly test if user\'s preprocess method works\n or not. The input is non-quantized activation and function returns\n processed activation to be quantized.\n If None, the activation will be quantized directly.\n Default is None.\n weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that\n defines how to quantize weight.\n Using this can quickly test if user\'s quantization method works or not.\n In this layer, user should both define quantization method and\n dequantization method, that is, the function\'s input is non-quantized\n weight and returns dequantized weight.\n If None, will use uantization op defined by \'weight_quantize_type\'.\n Default is None.\n act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines\n how to quantize activation.\n Using this can quickly test if user\'s quantization method works or not.\n In this layer, user should both define quantization method and\n dequantization method, that is, the function\'s input is non-quantized\n activation and returns dequantized activation. \n If None, will use quantization op defined by \'activation_quantize_type\'.\n Default is None.\n\n Note:\n If user sets attribute \'skip_quant\' to a Layer that support dynamic\n quantization and sets it to true, the layer would not be quantized\n during training. If this attribute is not sets or the attribute is\n false, the Layer would be qunatized in training.\n\n Examples 1:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n from paddle.vision.models import resnet\n \n model = resnet.resnet50(pretrained=True)\n\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type=\'abs_max\',\n activation_quantize_type=\'moving_average_abs_max\')\n \n # Add the fake quant logical.\n # The original model will be rewrite.\n # The outscale of outputs in supportted layers would be calculated.\n imperative_qat.quantize(model)\n\n # Fine-tune the quantized model\n # ...\n \n # Save quant model for the inference.\n imperative_qat.save_quantized_model(\n layer=model,\n model_path="./resnet50_qat",\n input_spec=[\n paddle.static.InputSpec(\n shape=[None, 3, 224, 224], dtype=\'float32\')])\n\n Examples 2:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n\n class ImperativeModel(paddle.nn.Layer):\n def __init__(self):\n super(ImperativeModel, self).__init__()\n # self.linear_0 would skip the quantization.\n self.linear_0 = paddle.nn.Linear(784, 400)\n self.linear_0.skip_quant = True\n\n # self.linear_1 would not skip the quantization.\n self.linear_1 = paddle.nn.Linear(400, 10)\n self.linear_1.skip_quant = False\n\n def forward(self, inputs):\n x = self.linear_0(inputs)\n x = self.linear_1(inputs)\n return x\n\n model = ImperativeModel()\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type=\'abs_max\',\n activation_quantize_type=\'moving_average_abs_max\')\n\n # Add the fake quant logical.\n # The original model will be rewrite.\n #\n # There is only one Layer(self.linear1) would be added the\n # fake quant logical.\n imperative_qat.quantize(model)\n\n # Fine-tune the quantized model\n # ...\n\n # Save quant model for the inference.\n imperative_qat.save_quantized_model(\n layer=model,\n model_path="./imperative_model_qat")\n '
super(ImperativeQuantAware, self).__init__()
kwargs = {'quantizable_layer_type': quantizable_layer_type, 'weight_quantize_type': weight_quantize_type, 'activation_quantize_type': activation_quantize_type, 'weight_bits': weight_bits, 'activation_bits': activation_bits, 'moving_rate': moving_rate, 'weight_preprocess_layer': weight_preprocess_layer, 'act_preprocess_layer': act_preprocess_layer, 'weight_quantize_layer': weight_quantize_layer, 'act_quantize_layer': act_quantize_layer}
self._quantize_inputs = ImperativeQuantizeInputs(**kwargs)
self._quantize_outputs = ImperativeQuantizeOutputs(moving_rate)
|
def quantize(self, model):
"\n According to weights' and activations' quantization types,\n the model will be added some fake quant ops, such as\n fake_quantize_dequantize_moving_average_abs_max,\n fake_quantize_dequantize_abs_max and so on. At the same time,\n the out_scale value of outputs would be calculated.\n\n Args:\n model(paddle.nn.Layer): the model to be quantized.\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n\n class ImperativeModel(paddle.nn.Layer):\n def __init__(self):\n super(ImperativeModel, self).__init__()\n # self.linear_0 would skip the quantization.\n self.linear_0 = paddle.nn.Linear(784, 400)\n self.linear_0.skip_quant = True\n\n # self.linear_1 would not skip the quantization.\n self.linear_1 = paddle.nn.Linear(400, 10)\n self.linear_1.skip_quant = False\n\n def forward(self, inputs):\n x = self.linear_0(inputs)\n x = self.linear_1(inputs)\n return x\n\n model = ImperativeModel()\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type='abs_max',\n activation_quantize_type='moving_average_abs_max')\n\n # Add the fake quant logical.\n # The original model will be rewrite.\n #\n # There is only one Layer(self.linear1) would be added the\n # fake quant logical.\n imperative_qat.quantize(model)\n "
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
self._quantize_inputs.apply(model)
self._quantize_outputs.apply(model)
| 4,487,817,234,187,552,000
|
According to weights' and activations' quantization types,
the model will be added some fake quant ops, such as
fake_quantize_dequantize_moving_average_abs_max,
fake_quantize_dequantize_abs_max and so on. At the same time,
the out_scale value of outputs would be calculated.
Args:
model(paddle.nn.Layer): the model to be quantized.
Returns:
None
Examples:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
quantize
|
MissPenguin/Paddle
|
python
|
def quantize(self, model):
"\n According to weights' and activations' quantization types,\n the model will be added some fake quant ops, such as\n fake_quantize_dequantize_moving_average_abs_max,\n fake_quantize_dequantize_abs_max and so on. At the same time,\n the out_scale value of outputs would be calculated.\n\n Args:\n model(paddle.nn.Layer): the model to be quantized.\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n\n class ImperativeModel(paddle.nn.Layer):\n def __init__(self):\n super(ImperativeModel, self).__init__()\n # self.linear_0 would skip the quantization.\n self.linear_0 = paddle.nn.Linear(784, 400)\n self.linear_0.skip_quant = True\n\n # self.linear_1 would not skip the quantization.\n self.linear_1 = paddle.nn.Linear(400, 10)\n self.linear_1.skip_quant = False\n\n def forward(self, inputs):\n x = self.linear_0(inputs)\n x = self.linear_1(inputs)\n return x\n\n model = ImperativeModel()\n imperative_qat = ImperativeQuantAware(\n weight_quantize_type='abs_max',\n activation_quantize_type='moving_average_abs_max')\n\n # Add the fake quant logical.\n # The original model will be rewrite.\n #\n # There is only one Layer(self.linear1) would be added the\n # fake quant logical.\n imperative_qat.quantize(model)\n "
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
self._quantize_inputs.apply(model)
self._quantize_outputs.apply(model)
|
def __init__(self, quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], weight_quantize_type='abs_max', activation_quantize_type='moving_average_abs_max', weight_bits=8, activation_bits=8, moving_rate=0.9, weight_preprocess_layer=None, act_preprocess_layer=None, weight_quantize_layer=None, act_quantize_layer=None):
'\n The constructor for ImperativeQuantizeInputs. \n\n Please refer to the args of ImperativeQuantAware.\n '
super(ImperativeQuantizeInputs, self).__init__()
self._quantizable_layer_type = tuple(((utils.layer_name_map[layer] if (layer in utils.layer_name_map) else layer) for layer in quantizable_layer_type))
for layer in self._quantizable_layer_type:
assert ((not isinstance(layer, str)) and (layer in utils.fake_quant_input_layers)), ('%s is unspported to be quantized.' % layer)
quantize_type = {'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'}
assert ((weight_quantize_type != 'moving_average_abs_max') and (weight_quantize_type in quantize_type)), ('Unsupported weight_quantize_type: %s. It can only be abs_max or channel_wise_abs_max.' % weight_quantize_type)
assert (activation_quantize_type == 'moving_average_abs_max'), ('Unsupported activation_quantize_type: %s. It can only be moving_average_abs_max now.' % activation_quantize_type)
bits_check = (lambda bits: (isinstance(bits, int) and (bits >= 0) and (bits <= 16)))
assert bits_check(weight_bits), 'weight_bits should be 1, 2,... or 16.'
assert bits_check(activation_bits), 'activation_bits should be 1, 2,... or 16.'
layer_check = (lambda method: ((method is None) or issubclass(method, dygraph.layers.Layer)))
assert layer_check(weight_preprocess_layer), 'weight_preprocess should be nn.Layer.'
assert layer_check(act_preprocess_layer), 'act_preprocess should be nn.Layer.'
assert layer_check(weight_quantize_layer), 'weight_quantize should be nn.Layer.'
assert layer_check(act_quantize_layer), 'act_quantize should be nn.Layer.'
self._kwargs = {'weight_quantize_type': weight_quantize_type, 'activation_quantize_type': activation_quantize_type, 'weight_bits': weight_bits, 'activation_bits': activation_bits, 'moving_rate': moving_rate, 'weight_pre_layer': weight_preprocess_layer, 'act_pre_layer': act_preprocess_layer, 'weight_quant_layer': weight_quantize_layer, 'act_quant_layer': act_quantize_layer}
| -7,740,397,867,562,542,000
|
The constructor for ImperativeQuantizeInputs.
Please refer to the args of ImperativeQuantAware.
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
__init__
|
MissPenguin/Paddle
|
python
|
def __init__(self, quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'], weight_quantize_type='abs_max', activation_quantize_type='moving_average_abs_max', weight_bits=8, activation_bits=8, moving_rate=0.9, weight_preprocess_layer=None, act_preprocess_layer=None, weight_quantize_layer=None, act_quantize_layer=None):
'\n The constructor for ImperativeQuantizeInputs. \n\n Please refer to the args of ImperativeQuantAware.\n '
super(ImperativeQuantizeInputs, self).__init__()
self._quantizable_layer_type = tuple(((utils.layer_name_map[layer] if (layer in utils.layer_name_map) else layer) for layer in quantizable_layer_type))
for layer in self._quantizable_layer_type:
assert ((not isinstance(layer, str)) and (layer in utils.fake_quant_input_layers)), ('%s is unspported to be quantized.' % layer)
quantize_type = {'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'}
assert ((weight_quantize_type != 'moving_average_abs_max') and (weight_quantize_type in quantize_type)), ('Unsupported weight_quantize_type: %s. It can only be abs_max or channel_wise_abs_max.' % weight_quantize_type)
assert (activation_quantize_type == 'moving_average_abs_max'), ('Unsupported activation_quantize_type: %s. It can only be moving_average_abs_max now.' % activation_quantize_type)
bits_check = (lambda bits: (isinstance(bits, int) and (bits >= 0) and (bits <= 16)))
assert bits_check(weight_bits), 'weight_bits should be 1, 2,... or 16.'
assert bits_check(activation_bits), 'activation_bits should be 1, 2,... or 16.'
layer_check = (lambda method: ((method is None) or issubclass(method, dygraph.layers.Layer)))
assert layer_check(weight_preprocess_layer), 'weight_preprocess should be nn.Layer.'
assert layer_check(act_preprocess_layer), 'act_preprocess should be nn.Layer.'
assert layer_check(weight_quantize_layer), 'weight_quantize should be nn.Layer.'
assert layer_check(act_quantize_layer), 'act_quantize should be nn.Layer.'
self._kwargs = {'weight_quantize_type': weight_quantize_type, 'activation_quantize_type': activation_quantize_type, 'weight_bits': weight_bits, 'activation_bits': activation_bits, 'moving_rate': moving_rate, 'weight_pre_layer': weight_preprocess_layer, 'act_pre_layer': act_preprocess_layer, 'weight_quant_layer': weight_quantize_layer, 'act_quant_layer': act_quantize_layer}
|
def apply(self, model):
'\n Quantize the weights and activations to calculate for specific \n layers.\n\n Args:\n model(paddle.nn.Layer): The target model which would\n calculate the input quantization scale.\n\n Returns:\n None\n '
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
for (name, cur_layer) in model.named_sublayers():
if ((not isinstance(cur_layer, self._quantizable_layer_type)) or (hasattr(cur_layer, 'skip_quant') and (cur_layer.skip_quant == True))):
continue
(parent_layer, sub_name) = utils.find_parent_layer_and_sub_name(model, name)
cur_quant_layer = self._get_input_quantized_layer(cur_layer)
setattr(parent_layer, sub_name, cur_quant_layer)
| 958,465,973,222,111,000
|
Quantize the weights and activations to calculate for specific
layers.
Args:
model(paddle.nn.Layer): The target model which would
calculate the input quantization scale.
Returns:
None
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
apply
|
MissPenguin/Paddle
|
python
|
def apply(self, model):
'\n Quantize the weights and activations to calculate for specific \n layers.\n\n Args:\n model(paddle.nn.Layer): The target model which would\n calculate the input quantization scale.\n\n Returns:\n None\n '
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
for (name, cur_layer) in model.named_sublayers():
if ((not isinstance(cur_layer, self._quantizable_layer_type)) or (hasattr(cur_layer, 'skip_quant') and (cur_layer.skip_quant == True))):
continue
(parent_layer, sub_name) = utils.find_parent_layer_and_sub_name(model, name)
cur_quant_layer = self._get_input_quantized_layer(cur_layer)
setattr(parent_layer, sub_name, cur_quant_layer)
|
def __init__(self, moving_rate=0.9):
'\n The constructor for ImperativeQuantizeOutputs.\n\n Args:\n moving_rate(float): The decay coefficient of moving average.\n The default value is 0.9.\n '
super(ImperativeQuantizeOutputs, self).__init__()
self._moving_rate = moving_rate
| 7,053,021,617,716,009,000
|
The constructor for ImperativeQuantizeOutputs.
Args:
moving_rate(float): The decay coefficient of moving average.
The default value is 0.9.
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
__init__
|
MissPenguin/Paddle
|
python
|
def __init__(self, moving_rate=0.9):
'\n The constructor for ImperativeQuantizeOutputs.\n\n Args:\n moving_rate(float): The decay coefficient of moving average.\n The default value is 0.9.\n '
super(ImperativeQuantizeOutputs, self).__init__()
self._moving_rate = moving_rate
|
def apply(self, model):
'\n Insert the `moving_average_abs_max_scale` layers to calculate the\n output scales for specific layers in the dygraph model.\n\n Args:\n model(paddle.nn.Layer): The target model which would be\n calculate the output quantization scale.\n\n Returns:\n None\n '
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
for (cur_name, cur_layer) in model.named_sublayers():
if ('_act_preprocess' in cur_name):
continue
if (not self._is_target_layer(cur_layer)):
continue
(parent_layer, sub_name) = utils.find_parent_layer_and_sub_name(model, cur_name)
if isinstance(cur_layer, tuple(utils.fake_quant_output_layers)):
cur_quant_layer = quant_layers.FakeQuantMAOutputScaleLayer(cur_layer, self._moving_rate)
else:
cur_quant_layer = quant_layers.MAOutputScaleLayer(cur_layer, self._moving_rate)
setattr(parent_layer, sub_name, cur_quant_layer)
| -2,443,531,186,074,505,700
|
Insert the `moving_average_abs_max_scale` layers to calculate the
output scales for specific layers in the dygraph model.
Args:
model(paddle.nn.Layer): The target model which would be
calculate the output quantization scale.
Returns:
None
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
apply
|
MissPenguin/Paddle
|
python
|
def apply(self, model):
'\n Insert the `moving_average_abs_max_scale` layers to calculate the\n output scales for specific layers in the dygraph model.\n\n Args:\n model(paddle.nn.Layer): The target model which would be\n calculate the output quantization scale.\n\n Returns:\n None\n '
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
for (cur_name, cur_layer) in model.named_sublayers():
if ('_act_preprocess' in cur_name):
continue
if (not self._is_target_layer(cur_layer)):
continue
(parent_layer, sub_name) = utils.find_parent_layer_and_sub_name(model, cur_name)
if isinstance(cur_layer, tuple(utils.fake_quant_output_layers)):
cur_quant_layer = quant_layers.FakeQuantMAOutputScaleLayer(cur_layer, self._moving_rate)
else:
cur_quant_layer = quant_layers.MAOutputScaleLayer(cur_layer, self._moving_rate)
setattr(parent_layer, sub_name, cur_quant_layer)
|
def save_quantized_model(self, model, path, input_spec=None, **config):
"\n Save the quantized model for the inference.\n\n Args:\n model (Layer): The model to be saved.\n path (str): The path prefix to save model. The format is \n ``dirname/file_prefix`` or ``file_prefix``.\n input_spec (list[InputSpec|Tensor], optional): Describes the input\n of the saved model's forward method, which can be described by\n InputSpec or example Tensor. If None, all input variables of \n the original Layer's forward method would be the inputs of\n the saved model. Default None.\n **configs (dict, optional): Other save configuration options for\n compatibility. We do not recommend using these configurations,\n they may be removed in the future. If not necessary, DO NOT use\n them. Default None.\n The following options are currently supported:\n (1) output_spec (list[Tensor]): Selects the output targets of\n the saved model. By default, all return variables of original\n Layer's forward method are kept as the output of the saved model.\n If the provided ``output_spec`` list is not all output variables, \n the saved model will be pruned according to the given\n ``output_spec`` list. \n\n Returns:\n None\n "
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
paddle.jit.save(layer=model, path=path, input_spec=input_spec, **config)
is_dynamic_mode = False
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
place = core.CPUPlace()
scope = global_scope()
exe = Executor(place)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
model_filename = (basename + INFER_MODEL_SUFFIX)
params_filename = (basename + INFER_PARAMS_SUFFIX)
[infer_program, feed_target_names, fetch_targets] = load_inference_model(dirname=dirname, executor=exe, model_filename=model_filename, params_filename=params_filename)
self._gather_scales(infer_program, scope)
self._set_skip_quant_attr(infer_program)
save_inference_model(dirname=dirname, feeded_var_names=feed_target_names, target_vars=fetch_targets, executor=exe, main_program=infer_program.clone(), model_filename=model_filename, params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
| -5,138,356,119,441,218,000
|
Save the quantized model for the inference.
Args:
model (Layer): The model to be saved.
path (str): The path prefix to save model. The format is
``dirname/file_prefix`` or ``file_prefix``.
input_spec (list[InputSpec|Tensor], optional): Describes the input
of the saved model's forward method, which can be described by
InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of
the saved model. Default None.
**configs (dict, optional): Other save configuration options for
compatibility. We do not recommend using these configurations,
they may be removed in the future. If not necessary, DO NOT use
them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of
the saved model. By default, all return variables of original
Layer's forward method are kept as the output of the saved model.
If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given
``output_spec`` list.
Returns:
None
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
save_quantized_model
|
MissPenguin/Paddle
|
python
|
def save_quantized_model(self, model, path, input_spec=None, **config):
"\n Save the quantized model for the inference.\n\n Args:\n model (Layer): The model to be saved.\n path (str): The path prefix to save model. The format is \n ``dirname/file_prefix`` or ``file_prefix``.\n input_spec (list[InputSpec|Tensor], optional): Describes the input\n of the saved model's forward method, which can be described by\n InputSpec or example Tensor. If None, all input variables of \n the original Layer's forward method would be the inputs of\n the saved model. Default None.\n **configs (dict, optional): Other save configuration options for\n compatibility. We do not recommend using these configurations,\n they may be removed in the future. If not necessary, DO NOT use\n them. Default None.\n The following options are currently supported:\n (1) output_spec (list[Tensor]): Selects the output targets of\n the saved model. By default, all return variables of original\n Layer's forward method are kept as the output of the saved model.\n If the provided ``output_spec`` list is not all output variables, \n the saved model will be pruned according to the given\n ``output_spec`` list. \n\n Returns:\n None\n "
assert isinstance(model, dygraph.Layer), 'The model must be the instance of dygraph.Layer.'
paddle.jit.save(layer=model, path=path, input_spec=input_spec, **config)
is_dynamic_mode = False
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
place = core.CPUPlace()
scope = global_scope()
exe = Executor(place)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
model_filename = (basename + INFER_MODEL_SUFFIX)
params_filename = (basename + INFER_PARAMS_SUFFIX)
[infer_program, feed_target_names, fetch_targets] = load_inference_model(dirname=dirname, executor=exe, model_filename=model_filename, params_filename=params_filename)
self._gather_scales(infer_program, scope)
self._set_skip_quant_attr(infer_program)
save_inference_model(dirname=dirname, feeded_var_names=feed_target_names, target_vars=fetch_targets, executor=exe, main_program=infer_program.clone(), model_filename=model_filename, params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
|
def _is_target_layer(self, layer):
'\n Whether the layer needs to calculate output scales.\n '
flag = False
if isinstance(layer, dygraph.Layer):
if (utils.is_leaf_layer(layer) and (not isinstance(layer, tuple(utils.fake_quant_leaf_layers)))):
flag = True
if isinstance(layer, tuple(utils.fake_quant_wrap_layers)):
flag = True
if isinstance(layer, paddle.nn.quant.FloatFunctionalLayer):
flag = True
return flag
| 8,147,025,312,163,031,000
|
Whether the layer needs to calculate output scales.
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
_is_target_layer
|
MissPenguin/Paddle
|
python
|
def _is_target_layer(self, layer):
'\n \n '
flag = False
if isinstance(layer, dygraph.Layer):
if (utils.is_leaf_layer(layer) and (not isinstance(layer, tuple(utils.fake_quant_leaf_layers)))):
flag = True
if isinstance(layer, tuple(utils.fake_quant_wrap_layers)):
flag = True
if isinstance(layer, paddle.nn.quant.FloatFunctionalLayer):
flag = True
return flag
|
def _gather_scales(self, program, scope):
'\n Get all scales from fake ops, save them into the corresponding ops\n and delete all moving_average_abs_max_scale ops. \n '
def _gather_input_scale():
target_ops = []
skip_ops = (utils.fake_quantize_dequantize_op_types + ['moving_average_abs_max_scale'])
for block in program.blocks:
for op in block.ops:
if (op.type not in skip_ops):
target_ops.append(op)
for op in target_ops:
for in_var_name in utils._get_op_input_var_names(op):
previous_op = utils.find_previous_op(op.block, in_var_name)
if ((previous_op is not None) and (('quantize_dequantize' in previous_op.type) or (previous_op.type == 'moving_average_abs_max_scale'))):
scale_name = previous_op.output('OutScale')[0]
in_scale = utils.load_variable_data(scope, scale_name)
in_scale = utils.fp_numpy_to_naive(in_scale)
(argname, index) = utils._get_input_name_index(op, in_var_name)
op._set_attr(((argname + str(index)) + '_threshold'), in_scale)
def _gather_output_scale():
target_ops = []
for block in program.blocks:
for op in block.ops:
if (op.type == 'moving_average_abs_max_scale'):
target_ops.append(op)
for op in target_ops:
in_var_name = op.input('X')[0]
out_var_name = op.output('Out')[0]
block = op.block
previous_op = utils.find_previous_op(block, in_var_name)
next_ops = utils.find_next_ops(block, out_var_name)
out_scale_name = op.output('OutScale')[0]
out_scale = utils.load_variable_data(scope, out_scale_name)
out_scale = utils.fp_numpy_to_naive(out_scale)
if (previous_op.type != 'feed'):
(argname, index) = utils._get_output_name_index(previous_op, in_var_name)
previous_op._set_attr(((argname + str(index)) + '_threshold'), out_scale)
previous_op._set_attr('out_threshold', out_scale)
for next_op in next_ops:
next_op._rename_input(out_var_name, in_var_name)
_gather_input_scale()
_gather_output_scale()
| 4,573,290,824,300,222,000
|
Get all scales from fake ops, save them into the corresponding ops
and delete all moving_average_abs_max_scale ops.
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
_gather_scales
|
MissPenguin/Paddle
|
python
|
def _gather_scales(self, program, scope):
'\n Get all scales from fake ops, save them into the corresponding ops\n and delete all moving_average_abs_max_scale ops. \n '
def _gather_input_scale():
target_ops = []
skip_ops = (utils.fake_quantize_dequantize_op_types + ['moving_average_abs_max_scale'])
for block in program.blocks:
for op in block.ops:
if (op.type not in skip_ops):
target_ops.append(op)
for op in target_ops:
for in_var_name in utils._get_op_input_var_names(op):
previous_op = utils.find_previous_op(op.block, in_var_name)
if ((previous_op is not None) and (('quantize_dequantize' in previous_op.type) or (previous_op.type == 'moving_average_abs_max_scale'))):
scale_name = previous_op.output('OutScale')[0]
in_scale = utils.load_variable_data(scope, scale_name)
in_scale = utils.fp_numpy_to_naive(in_scale)
(argname, index) = utils._get_input_name_index(op, in_var_name)
op._set_attr(((argname + str(index)) + '_threshold'), in_scale)
def _gather_output_scale():
target_ops = []
for block in program.blocks:
for op in block.ops:
if (op.type == 'moving_average_abs_max_scale'):
target_ops.append(op)
for op in target_ops:
in_var_name = op.input('X')[0]
out_var_name = op.output('Out')[0]
block = op.block
previous_op = utils.find_previous_op(block, in_var_name)
next_ops = utils.find_next_ops(block, out_var_name)
out_scale_name = op.output('OutScale')[0]
out_scale = utils.load_variable_data(scope, out_scale_name)
out_scale = utils.fp_numpy_to_naive(out_scale)
if (previous_op.type != 'feed'):
(argname, index) = utils._get_output_name_index(previous_op, in_var_name)
previous_op._set_attr(((argname + str(index)) + '_threshold'), out_scale)
previous_op._set_attr('out_threshold', out_scale)
for next_op in next_ops:
next_op._rename_input(out_var_name, in_var_name)
_gather_input_scale()
_gather_output_scale()
|
def _set_skip_quant_attr(self, program):
'\n Label the skip quantized ops.\n '
for block in program.blocks:
for op in block.ops:
if self._is_skip_quant_op(block, op):
op._set_attr('skip_quant', True)
| 1,421,227,798,379,409,000
|
Label the skip quantized ops.
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
_set_skip_quant_attr
|
MissPenguin/Paddle
|
python
|
def _set_skip_quant_attr(self, program):
'\n \n '
for block in program.blocks:
for op in block.ops:
if self._is_skip_quant_op(block, op):
op._set_attr('skip_quant', True)
|
def _is_skip_quant_op(self, block, in_op):
'\n The input op should be skipped quantization.\n 1. the type of input op should be conv2d, depthwise_conv2d or matmul\n 2. the previous ops of the input op are not fake_quantize_dequantize ops\n '
target_op_types = ['conv2d', 'depthwise_conv2d', 'matmul', 'conv2d_transpose']
if (in_op.type not in target_op_types):
return False
previous_ops = [utils.find_previous_op(block, arg_name) for arg_name in in_op.input_arg_names]
return any((((op is not None) and (op.type not in utils.fake_quantize_dequantize_op_types)) for op in previous_ops))
| -4,782,836,982,770,918,000
|
The input op should be skipped quantization.
1. the type of input op should be conv2d, depthwise_conv2d or matmul
2. the previous ops of the input op are not fake_quantize_dequantize ops
|
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py
|
_is_skip_quant_op
|
MissPenguin/Paddle
|
python
|
def _is_skip_quant_op(self, block, in_op):
'\n The input op should be skipped quantization.\n 1. the type of input op should be conv2d, depthwise_conv2d or matmul\n 2. the previous ops of the input op are not fake_quantize_dequantize ops\n '
target_op_types = ['conv2d', 'depthwise_conv2d', 'matmul', 'conv2d_transpose']
if (in_op.type not in target_op_types):
return False
previous_ops = [utils.find_previous_op(block, arg_name) for arg_name in in_op.input_arg_names]
return any((((op is not None) and (op.type not in utils.fake_quantize_dequantize_op_types)) for op in previous_ops))
|
def pascal_row(self, n):
" Returns n-th row of Pascal's triangle\n "
result = [1]
(x, numerator) = (1, n)
for denominator in range(1, ((n // 2) + 1)):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if ((n & 1) == 0):
result.extend(reversed(result[:(- 1)]))
else:
result.extend(reversed(result))
return result
| -7,095,657,236,191,123,000
|
Returns n-th row of Pascal's triangle
|
info/utils/captcha/captcha.py
|
pascal_row
|
rymmx/My_information
|
python
|
def pascal_row(self, n):
" \n "
result = [1]
(x, numerator) = (1, n)
for denominator in range(1, ((n // 2) + 1)):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if ((n & 1) == 0):
result.extend(reversed(result[:(- 1)]))
else:
result.extend(reversed(result))
return result
|
def make_bezier(self, n):
' Bezier curves:\n http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization\n '
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row((n - 1))
result = []
for t in self.tsequence:
tpowers = ((t ** i) for i in range(n))
upowers = (((1 - t) ** i) for i in range((n - 1), (- 1), (- 1)))
coefs = [((c * a) * b) for (c, a, b) in zip(combinations, tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
| 7,316,862,772,145,992,000
|
Bezier curves:
http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
|
info/utils/captcha/captcha.py
|
make_bezier
|
rymmx/My_information
|
python
|
def make_bezier(self, n):
' Bezier curves:\n http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization\n '
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row((n - 1))
result = []
for t in self.tsequence:
tpowers = ((t ** i) for i in range(n))
upowers = (((1 - t) ** i) for i in range((n - 1), (- 1), (- 1)))
coefs = [((c * a) * b) for (c, a, b) in zip(combinations, tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
|
def captcha(self, path=None, fmt='JPEG'):
"Create a captcha.\n\n Args:\n path: save path, default None.\n fmt: image format, PNG / JPEG.\n Returns:\n A tuple, (name, text, StringIO.value).\n For example:\n ('EXAMPLE_KEY', 'JGW9', '\x89PNG\r\n\x1a\n\x00\x00\x00\r...')\n\n "
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
name = ''.join(random.sample(((string.ascii_lowercase + string.ascii_uppercase) + '3456789'), 24))
text = ''.join(self._text)
out = BytesIO()
image.save(out, format=fmt)
if path:
image.save(os.path.join(path, name), fmt)
return (name, text, out.getvalue())
| 5,946,452,396,114,644,000
|
Create a captcha.
Args:
path: save path, default None.
fmt: image format, PNG / JPEG.
Returns:
A tuple, (name, text, StringIO.value).
For example:
('EXAMPLE_KEY', 'JGW9', 'PNG
...')
|
info/utils/captcha/captcha.py
|
captcha
|
rymmx/My_information
|
python
|
def captcha(self, path=None, fmt='JPEG'):
"Create a captcha.\n\n Args:\n path: save path, default None.\n fmt: image format, PNG / JPEG.\n Returns:\n A tuple, (name, text, StringIO.value).\n For example:\n ('EXAMPLE_KEY', 'JGW9', '\x89PNG\r\n\x1a\n\x00\x00\x00\r...')\n\n "
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
name = .join(random.sample(((string.ascii_lowercase + string.ascii_uppercase) + '3456789'), 24))
text = .join(self._text)
out = BytesIO()
image.save(out, format=fmt)
if path:
image.save(os.path.join(path, name), fmt)
return (name, text, out.getvalue())
|
def parse_requirements(file_):
"Parse a requirements formatted file.\n\n Traverse a string until a delimiter is detected, then split at said\n delimiter, get module name by element index, create a dict consisting of\n module:version, and add dict to list of parsed modules.\n\n Args:\n file_: File to parse.\n\n Raises:\n OSerror: If there's any issues accessing the file.\n\n Returns:\n tuple: The contents of the file, excluding comments.\n "
modules = []
delim = ['<', '>', '=', '!', '~']
try:
f = open_func(file_, 'r')
except OSError:
logging.error('Failed on file: {}'.format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if (x != '\n')]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if (not any([(y in x) for y in delim])):
modules.append({'name': x, 'version': None})
for y in x:
if (y in delim):
module = x.split(y)
module_name = module[0]
module_version = module[(- 1)].replace('=', '')
module = {'name': module_name, 'version': module_version}
if (module not in modules):
modules.append(module)
break
return modules
| 8,203,025,767,294,502,000
|
Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments.
|
pipenv/vendor/pipreqs/pipreqs.py
|
parse_requirements
|
0mp/pipenv
|
python
|
def parse_requirements(file_):
"Parse a requirements formatted file.\n\n Traverse a string until a delimiter is detected, then split at said\n delimiter, get module name by element index, create a dict consisting of\n module:version, and add dict to list of parsed modules.\n\n Args:\n file_: File to parse.\n\n Raises:\n OSerror: If there's any issues accessing the file.\n\n Returns:\n tuple: The contents of the file, excluding comments.\n "
modules = []
delim = ['<', '>', '=', '!', '~']
try:
f = open_func(file_, 'r')
except OSError:
logging.error('Failed on file: {}'.format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if (x != '\n')]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if (not any([(y in x) for y in delim])):
modules.append({'name': x, 'version': None})
for y in x:
if (y in delim):
module = x.split(y)
module_name = module[0]
module_version = module[(- 1)].replace('=', )
module = {'name': module_name, 'version': module_version}
if (module not in modules):
modules.append(module)
break
return modules
|
def compare_modules(file_, imports):
'Compare modules in a file to imported modules in a project.\n\n Args:\n file_ (str): File to parse for modules to be compared.\n imports (tuple): Modules being imported in the project.\n\n Returns:\n tuple: The modules not imported in the project, but do exist in the\n specified file.\n '
modules = parse_requirements(file_)
imports = [imports[i]['name'] for i in range(len(imports))]
modules = [modules[i]['name'] for i in range(len(modules))]
modules_not_imported = (set(modules) - set(imports))
return modules_not_imported
| 6,199,117,424,864,670,000
|
Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
|
pipenv/vendor/pipreqs/pipreqs.py
|
compare_modules
|
0mp/pipenv
|
python
|
def compare_modules(file_, imports):
'Compare modules in a file to imported modules in a project.\n\n Args:\n file_ (str): File to parse for modules to be compared.\n imports (tuple): Modules being imported in the project.\n\n Returns:\n tuple: The modules not imported in the project, but do exist in the\n specified file.\n '
modules = parse_requirements(file_)
imports = [imports[i]['name'] for i in range(len(imports))]
modules = [modules[i]['name'] for i in range(len(modules))]
modules_not_imported = (set(modules) - set(imports))
return modules_not_imported
|
def diff(file_, imports):
'Display the difference between modules in a file and imported modules.'
modules_not_imported = compare_modules(file_, imports)
logging.info('The following modules are in {} but do not seem to be imported: {}'.format(file_, ', '.join((x for x in modules_not_imported))))
| -1,095,672,304,813,857,800
|
Display the difference between modules in a file and imported modules.
|
pipenv/vendor/pipreqs/pipreqs.py
|
diff
|
0mp/pipenv
|
python
|
def diff(file_, imports):
modules_not_imported = compare_modules(file_, imports)
logging.info('The following modules are in {} but do not seem to be imported: {}'.format(file_, ', '.join((x for x in modules_not_imported))))
|
def clean(file_, imports):
"Remove modules that aren't imported in project from file."
modules_not_imported = compare_modules(file_, imports)
re_remove = re.compile('|'.join(modules_not_imported))
to_write = []
try:
f = open_func(file_, 'r+')
except OSError:
logging.error('Failed on file: {}'.format(file_))
raise
else:
for i in f.readlines():
if (re_remove.match(i) is None):
to_write.append(i)
f.seek(0)
f.truncate()
for i in to_write:
f.write(i)
finally:
f.close()
logging.info(('Successfully cleaned up requirements in ' + file_))
| -143,540,156,866,477,780
|
Remove modules that aren't imported in project from file.
|
pipenv/vendor/pipreqs/pipreqs.py
|
clean
|
0mp/pipenv
|
python
|
def clean(file_, imports):
modules_not_imported = compare_modules(file_, imports)
re_remove = re.compile('|'.join(modules_not_imported))
to_write = []
try:
f = open_func(file_, 'r+')
except OSError:
logging.error('Failed on file: {}'.format(file_))
raise
else:
for i in f.readlines():
if (re_remove.match(i) is None):
to_write.append(i)
f.seek(0)
f.truncate()
for i in to_write:
f.write(i)
finally:
f.close()
logging.info(('Successfully cleaned up requirements in ' + file_))
|
def dataloader(name):
'\n decorator for registering dataloader functions\n\n Args:\n name: data set name\n\n '
def loader(func):
_dataloaders[name] = func
return func
return loader
| 4,858,664,684,579,101,000
|
decorator for registering dataloader functions
Args:
name: data set name
|
local2global_embedding/run.py
|
dataloader
|
LJeub/Local2Global_embedding
|
python
|
def dataloader(name):
'\n decorator for registering dataloader functions\n\n Args:\n name: data set name\n\n '
def loader(func):
_dataloaders[name] = func
return func
return loader
|
def load_data(name):
'\n load data set\n\n Args:\n name: name of data set (one of {names})\n\n Returns:\n largest connected component of data set\n\n '
data = _dataloaders[name]()
data = largest_connected_component(data=data)
data.num_nodes = data.x.shape[0]
return data
| 1,143,256,952,060,514,800
|
load data set
Args:
name: name of data set (one of {names})
Returns:
largest connected component of data set
|
local2global_embedding/run.py
|
load_data
|
LJeub/Local2Global_embedding
|
python
|
def load_data(name):
'\n load data set\n\n Args:\n name: name of data set (one of {names})\n\n Returns:\n largest connected component of data set\n\n '
data = _dataloaders[name]()
data = largest_connected_component(data=data)
data.num_nodes = data.x.shape[0]
return data
|
def prepare_patches(output_folder, **kwargs):
'\n initialise patch data if ``output_folder`` does not exist, else load existing patch data\n\n Args:\n output_folder: folder for storing patch data\n **kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`\n\n Returns:\n patch_data, patch_graph\n '
output_folder = Path(output_folder)
if output_folder.is_dir():
patch_graph = torch.load((output_folder / 'patch_graph.pt'))
patch_data = [torch.load((output_folder / f'patch{i}.pt')) for i in range(patch_graph.num_nodes)]
else:
(patch_data, patch_graph) = create_patch_data(**kwargs)
output_folder.mkdir(parents=True)
torch.save(patch_graph, (output_folder / 'patch_graph.pt'))
for (i, data) in enumerate(patch_data):
torch.save(data, (output_folder / f'patch{i}.pt'))
return (patch_data, patch_graph)
| -6,062,185,444,986,822,000
|
initialise patch data if ``output_folder`` does not exist, else load existing patch data
Args:
output_folder: folder for storing patch data
**kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`
Returns:
patch_data, patch_graph
|
local2global_embedding/run.py
|
prepare_patches
|
LJeub/Local2Global_embedding
|
python
|
def prepare_patches(output_folder, **kwargs):
'\n initialise patch data if ``output_folder`` does not exist, else load existing patch data\n\n Args:\n output_folder: folder for storing patch data\n **kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`\n\n Returns:\n patch_data, patch_graph\n '
output_folder = Path(output_folder)
if output_folder.is_dir():
patch_graph = torch.load((output_folder / 'patch_graph.pt'))
patch_data = [torch.load((output_folder / f'patch{i}.pt')) for i in range(patch_graph.num_nodes)]
else:
(patch_data, patch_graph) = create_patch_data(**kwargs)
output_folder.mkdir(parents=True)
torch.save(patch_graph, (output_folder / 'patch_graph.pt'))
for (i, data) in enumerate(patch_data):
torch.save(data, (output_folder / f'patch{i}.pt'))
return (patch_data, patch_graph)
|
def csvlist(input_type=str):
'\n Create an argparse type that parses comma separated lists of type ``input_type``\n\n Args:\n input_type: type of list elements\n\n Returns:\n list parser\n\n '
def make_list(input_str):
return [input_type(s) for s in input_str.split(',')]
make_list.__doc__ = f'''
argparse type that parses comma separated list of type {input_type}
Args:
input_str: string to be parsed
Returns:
list of elements of type {input_type}
'''
return make_list
| -347,569,928,596,149,250
|
Create an argparse type that parses comma separated lists of type ``input_type``
Args:
input_type: type of list elements
Returns:
list parser
|
local2global_embedding/run.py
|
csvlist
|
LJeub/Local2Global_embedding
|
python
|
def csvlist(input_type=str):
'\n Create an argparse type that parses comma separated lists of type ``input_type``\n\n Args:\n input_type: type of list elements\n\n Returns:\n list parser\n\n '
def make_list(input_str):
return [input_type(s) for s in input_str.split(',')]
make_list.__doc__ = f'
argparse type that parses comma separated list of type {input_type}
Args:
input_str: string to be parsed
Returns:
list of elements of type {input_type}
'
return make_list
|
def run(**kwargs):
"\n Run training example.\n\n By default this function writes results to the current working directory. To override this use the ``output``\n keyword argument.\n\n This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.\n\n\n Keyword Args:\n data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)\n no_features: If ``True``, discard features and use node identity. (default: ``False``)\n num_epochs: Number of training epochs (default: ``200``)\n runs: Number of training runs (keep best result) (default: ``1``)\n dims: list of embedding dimensions (default: ``[2]``)\n hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``\n target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)\n min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)\n target_overlap: Target patch overlap (default: ``2 * max(dims)``)\n gamma: Value of 'gamma' for RMST sparsification (default: ``0``)\n sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})\n (default: ``'resistance'``)\n cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})\n (default: ``'metis'``)\n num_clusters: Target number of clusters for distributed, fennel, or metis.\n num_iters: Maximum iterations for distributed or fennel\n lr: Learning rate\n dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)\n output: output folder (default: ``'.'``)\n device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)\n plot: If ``True``, plot embedding performance (default: ``False``)\n verbose: If ``True``, show progress info (default: ``False``)\n\n This function only accepts keyword arguments and is also exposed as a command-line interface.\n\n .. rubric:: References\n\n .. [#l2g] L. G. S. Jeub et al.\n “Local2Global: Scaling global representation learning on graphs via local training”.\n DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.\n\n "
args = _parser.parse_args([])
for (key, value) in kwargs.items():
if (key in args):
setattr(args, key, value)
else:
raise TypeError(f'Unknown argument {key}')
output_folder = Path(args.output)
data = load_data(args.data)
neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)
graph = TGraph(data.edge_index, data.edge_attr)
basename = args.data
dims = args.dims
num_epochs = args.num_epochs
runs = args.runs
min_overlap = (args.min_overlap if (args.min_overlap is not None) else (max(dims) + 1))
target_overlap = (args.target_overlap if (args.target_overlap is not None) else (2 * max(dims)))
if args.no_features:
data.x = None
basename += '_no_features'
if args.dist:
basename += '_dist'
if (args.sparsify == 'resistance'):
sp_string = f'resistance_deg{args.target_patch_degree}'
elif (args.sparsify == 'rmst'):
sp_string = f'rmst_gamma{args.gamma}'
elif (args.sparsify == 'none'):
sp_string = 'no_sparsify'
else:
raise RuntimeError(f"Unknown sparsification method '{args.sparsify}'.")
if (args.cluster == 'louvain'):
cluster_fun = (lambda : louvain_clustering(graph))
cluster_string = 'louvain'
elif (args.cluster == 'distributed'):
cluster_fun = (lambda : distributed_clustering(graph, args.beta, rounds=args.num_iters))
cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'
elif (args.cluster == 'fennel'):
cluster_fun = (lambda : fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True, num_iters=args.num_iters))
cluster_string = f'fennel_n{args.num_clusters}_it{args.num_iters}'
elif (args.cluster == 'metis'):
cluster_fun = (lambda : metis_clustering(graph, num_clusters=args.num_clusters))
cluster_string = f'metis_n{args.num_clusters}'
else:
raise RuntimeError(f"Unknown cluster method '{args.cluster}'.")
cluster_file = (output_folder / f'{args.data}_{cluster_string}_clusters.pt')
if cluster_file.is_file():
clusters = torch.load(cluster_file)
else:
clusters = cluster_fun()
torch.save(clusters, cluster_file)
patch_folder = (output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches')
(patch_data, patch_graph) = prepare_patches(output_folder=patch_folder, data=data, partition_tensor=clusters, min_overlap=min_overlap, target_overlap=target_overlap, sparsify_method=args.sparsify, gamma=args.gamma, target_patch_degree=args.target_patch_degree, verbose=args.verbose)
if args.verbose:
print(f'total edges: {data.num_edges}')
print(f'total patch edges: {sum((c.num_edges for c in patch_data))}')
if args.no_features:
data.x = speye(data.num_nodes)
baseline_file = (output_folder / f'{basename}_full_info.json')
training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}
if baseline_file.is_file():
baseline_data = ResultsDict.load(baseline_file)
else:
baseline_data = ResultsDict()
for d in dims:
r = baseline_data.runs(d)
if (r < runs):
if args.verbose:
print(f'training full model for {(runs - r)} runs and d={d}')
for r_it in range(r, runs):
if args.verbose:
print(f'full model (d={d}) run {(r_it + 1)} of {runs}')
data = data.to(args.device)
model = train(data, VGAE_model(d, (d * args.hidden_multiplier), data.num_features, dist=args.dist).to(args.device), loss_fun=VGAE_loss, num_epochs=num_epochs, lr=args.lr, verbose=args.verbose)
coords = embedding(model, data)
auc = reconstruction_auc(coords, data, dist=args.dist)
if (auc > baseline_data.max_auc(d)):
if args.verbose:
print(f'new best (auc={auc})')
torch.save(model.state_dict(), (output_folder / f'{basename}_full_d{d}_best_model.pt'))
torch.save(coords, (output_folder / f'{basename}_full_d{d}_best_coords.pt'))
baseline_data.update_dim(d, [auc], training_args)
baseline_data.save(baseline_file)
results_file = (patch_folder / f'{basename}_l2g_info.json')
nt_results_file = (patch_folder / f'{basename}_nt_info.json')
if results_file.is_file():
results = ResultsDict.load(results_file, replace=True)
else:
results = ResultsDict(replace=True)
if nt_results_file.is_file():
nt_results = ResultsDict.load(nt_results_file, replace=True)
else:
nt_results = ResultsDict(replace=True)
for d in dims:
patch_list = []
update_aligned_embedding = False
for (p_ind, patch) in enumerate(patch_data):
patch_result_file = (patch_folder / f'{basename}_patch{p_ind}_info.json')
if patch_result_file.is_file():
patch_results = ResultsDict.load(patch_result_file)
else:
patch_results = ResultsDict()
coords_file = (patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt')
if coords_file.is_file():
best_coords = torch.load(coords_file)
r = patch_results.runs(d)
if args.no_features:
patch.x = speye(patch.num_nodes)
if (r < runs):
if args.verbose:
print(f'training patch{p_ind} for {(runs - r)} runs and d={d}')
patch = patch.to(args.device)
for r_it in range(r, runs):
if args.verbose:
print(f'patch{p_ind} (d={d}) run {(r_it + 1)} of {runs}')
model = train(patch, VGAE_model(d, (d * args.hidden_multiplier), patch.num_features, dist=args.dist).to(args.device), loss_fun=VGAE_loss, num_epochs=num_epochs, lr=args.lr)
coords = embedding(model, patch)
auc = reconstruction_auc(coords, patch, dist=args.dist)
if (auc > patch_results.max_auc(d)):
if args.verbose:
print(f'new best (auc={auc})')
best_coords = coords
torch.save(model.state_dict(), (patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt'))
torch.save(best_coords, coords_file)
update_aligned_embedding = True
patch_results.update_dim(d, [auc], training_args)
patch_results.save(patch_result_file)
patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))
patched_embedding_file = (patch_folder / f'{basename}_d{d}_coords.pt')
patched_embedding_file_nt = (patch_folder / f'{basename}_d{d}_ntcoords.pt')
if (update_aligned_embedding or (not patched_embedding_file.is_file())):
prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())
ntcoords = prob.mean_embedding()
coords = prob.get_aligned_embedding()
torch.save(coords, patched_embedding_file)
torch.save(ntcoords, patched_embedding_file_nt)
results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])
nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])
results.save(results_file)
nt_results.save(nt_results_file)
baseline_data = baseline_data.reduce_to_dims(dims)
results = results.reduce_to_dims(dims)
nt_results = nt_results.reduce_to_dims(dims)
if args.plot:
plt.figure()
plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o', color='tab:blue')
plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')
plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue', linewidth=1)
plt.xscale('log')
plt.xticks(dims, dims)
plt.minorticks_off()
plt.xlabel('embedding dimension')
plt.ylabel('AUC')
plt.legend()
oversampling_ratio = (sum((p.num_edges for p in patch_data)) / data.num_edges)
plt.title(f'oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}')
plt.savefig((output_folder / f'{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf'))
plt.show()
| -867,494,552,452,990,000
|
Run training example.
By default this function writes results to the current working directory. To override this use the ``output``
keyword argument.
This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.
Keyword Args:
data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)
no_features: If ``True``, discard features and use node identity. (default: ``False``)
num_epochs: Number of training epochs (default: ``200``)
runs: Number of training runs (keep best result) (default: ``1``)
dims: list of embedding dimensions (default: ``[2]``)
hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``
target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)
min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)
target_overlap: Target patch overlap (default: ``2 * max(dims)``)
gamma: Value of 'gamma' for RMST sparsification (default: ``0``)
sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})
(default: ``'resistance'``)
cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})
(default: ``'metis'``)
num_clusters: Target number of clusters for distributed, fennel, or metis.
num_iters: Maximum iterations for distributed or fennel
lr: Learning rate
dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)
output: output folder (default: ``'.'``)
device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)
plot: If ``True``, plot embedding performance (default: ``False``)
verbose: If ``True``, show progress info (default: ``False``)
This function only accepts keyword arguments and is also exposed as a command-line interface.
.. rubric:: References
.. [#l2g] L. G. S. Jeub et al.
“Local2Global: Scaling global representation learning on graphs via local training”.
DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.
|
local2global_embedding/run.py
|
run
|
LJeub/Local2Global_embedding
|
python
|
def run(**kwargs):
"\n Run training example.\n\n By default this function writes results to the current working directory. To override this use the ``output``\n keyword argument.\n\n This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.\n\n\n Keyword Args:\n data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)\n no_features: If ``True``, discard features and use node identity. (default: ``False``)\n num_epochs: Number of training epochs (default: ``200``)\n runs: Number of training runs (keep best result) (default: ``1``)\n dims: list of embedding dimensions (default: ``[2]``)\n hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``\n target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)\n min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)\n target_overlap: Target patch overlap (default: ``2 * max(dims)``)\n gamma: Value of 'gamma' for RMST sparsification (default: ``0``)\n sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})\n (default: ``'resistance'``)\n cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})\n (default: ``'metis'``)\n num_clusters: Target number of clusters for distributed, fennel, or metis.\n num_iters: Maximum iterations for distributed or fennel\n lr: Learning rate\n dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)\n output: output folder (default: ``'.'``)\n device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)\n plot: If ``True``, plot embedding performance (default: ``False``)\n verbose: If ``True``, show progress info (default: ``False``)\n\n This function only accepts keyword arguments and is also exposed as a command-line interface.\n\n .. rubric:: References\n\n .. [#l2g] L. G. S. Jeub et al.\n “Local2Global: Scaling global representation learning on graphs via local training”.\n DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.\n\n "
args = _parser.parse_args([])
for (key, value) in kwargs.items():
if (key in args):
setattr(args, key, value)
else:
raise TypeError(f'Unknown argument {key}')
output_folder = Path(args.output)
data = load_data(args.data)
neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)
graph = TGraph(data.edge_index, data.edge_attr)
basename = args.data
dims = args.dims
num_epochs = args.num_epochs
runs = args.runs
min_overlap = (args.min_overlap if (args.min_overlap is not None) else (max(dims) + 1))
target_overlap = (args.target_overlap if (args.target_overlap is not None) else (2 * max(dims)))
if args.no_features:
data.x = None
basename += '_no_features'
if args.dist:
basename += '_dist'
if (args.sparsify == 'resistance'):
sp_string = f'resistance_deg{args.target_patch_degree}'
elif (args.sparsify == 'rmst'):
sp_string = f'rmst_gamma{args.gamma}'
elif (args.sparsify == 'none'):
sp_string = 'no_sparsify'
else:
raise RuntimeError(f"Unknown sparsification method '{args.sparsify}'.")
if (args.cluster == 'louvain'):
cluster_fun = (lambda : louvain_clustering(graph))
cluster_string = 'louvain'
elif (args.cluster == 'distributed'):
cluster_fun = (lambda : distributed_clustering(graph, args.beta, rounds=args.num_iters))
cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'
elif (args.cluster == 'fennel'):
cluster_fun = (lambda : fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True, num_iters=args.num_iters))
cluster_string = f'fennel_n{args.num_clusters}_it{args.num_iters}'
elif (args.cluster == 'metis'):
cluster_fun = (lambda : metis_clustering(graph, num_clusters=args.num_clusters))
cluster_string = f'metis_n{args.num_clusters}'
else:
raise RuntimeError(f"Unknown cluster method '{args.cluster}'.")
cluster_file = (output_folder / f'{args.data}_{cluster_string}_clusters.pt')
if cluster_file.is_file():
clusters = torch.load(cluster_file)
else:
clusters = cluster_fun()
torch.save(clusters, cluster_file)
patch_folder = (output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches')
(patch_data, patch_graph) = prepare_patches(output_folder=patch_folder, data=data, partition_tensor=clusters, min_overlap=min_overlap, target_overlap=target_overlap, sparsify_method=args.sparsify, gamma=args.gamma, target_patch_degree=args.target_patch_degree, verbose=args.verbose)
if args.verbose:
print(f'total edges: {data.num_edges}')
print(f'total patch edges: {sum((c.num_edges for c in patch_data))}')
if args.no_features:
data.x = speye(data.num_nodes)
baseline_file = (output_folder / f'{basename}_full_info.json')
training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}
if baseline_file.is_file():
baseline_data = ResultsDict.load(baseline_file)
else:
baseline_data = ResultsDict()
for d in dims:
r = baseline_data.runs(d)
if (r < runs):
if args.verbose:
print(f'training full model for {(runs - r)} runs and d={d}')
for r_it in range(r, runs):
if args.verbose:
print(f'full model (d={d}) run {(r_it + 1)} of {runs}')
data = data.to(args.device)
model = train(data, VGAE_model(d, (d * args.hidden_multiplier), data.num_features, dist=args.dist).to(args.device), loss_fun=VGAE_loss, num_epochs=num_epochs, lr=args.lr, verbose=args.verbose)
coords = embedding(model, data)
auc = reconstruction_auc(coords, data, dist=args.dist)
if (auc > baseline_data.max_auc(d)):
if args.verbose:
print(f'new best (auc={auc})')
torch.save(model.state_dict(), (output_folder / f'{basename}_full_d{d}_best_model.pt'))
torch.save(coords, (output_folder / f'{basename}_full_d{d}_best_coords.pt'))
baseline_data.update_dim(d, [auc], training_args)
baseline_data.save(baseline_file)
results_file = (patch_folder / f'{basename}_l2g_info.json')
nt_results_file = (patch_folder / f'{basename}_nt_info.json')
if results_file.is_file():
results = ResultsDict.load(results_file, replace=True)
else:
results = ResultsDict(replace=True)
if nt_results_file.is_file():
nt_results = ResultsDict.load(nt_results_file, replace=True)
else:
nt_results = ResultsDict(replace=True)
for d in dims:
patch_list = []
update_aligned_embedding = False
for (p_ind, patch) in enumerate(patch_data):
patch_result_file = (patch_folder / f'{basename}_patch{p_ind}_info.json')
if patch_result_file.is_file():
patch_results = ResultsDict.load(patch_result_file)
else:
patch_results = ResultsDict()
coords_file = (patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt')
if coords_file.is_file():
best_coords = torch.load(coords_file)
r = patch_results.runs(d)
if args.no_features:
patch.x = speye(patch.num_nodes)
if (r < runs):
if args.verbose:
print(f'training patch{p_ind} for {(runs - r)} runs and d={d}')
patch = patch.to(args.device)
for r_it in range(r, runs):
if args.verbose:
print(f'patch{p_ind} (d={d}) run {(r_it + 1)} of {runs}')
model = train(patch, VGAE_model(d, (d * args.hidden_multiplier), patch.num_features, dist=args.dist).to(args.device), loss_fun=VGAE_loss, num_epochs=num_epochs, lr=args.lr)
coords = embedding(model, patch)
auc = reconstruction_auc(coords, patch, dist=args.dist)
if (auc > patch_results.max_auc(d)):
if args.verbose:
print(f'new best (auc={auc})')
best_coords = coords
torch.save(model.state_dict(), (patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt'))
torch.save(best_coords, coords_file)
update_aligned_embedding = True
patch_results.update_dim(d, [auc], training_args)
patch_results.save(patch_result_file)
patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))
patched_embedding_file = (patch_folder / f'{basename}_d{d}_coords.pt')
patched_embedding_file_nt = (patch_folder / f'{basename}_d{d}_ntcoords.pt')
if (update_aligned_embedding or (not patched_embedding_file.is_file())):
prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())
ntcoords = prob.mean_embedding()
coords = prob.get_aligned_embedding()
torch.save(coords, patched_embedding_file)
torch.save(ntcoords, patched_embedding_file_nt)
results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])
nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])
results.save(results_file)
nt_results.save(nt_results_file)
baseline_data = baseline_data.reduce_to_dims(dims)
results = results.reduce_to_dims(dims)
nt_results = nt_results.reduce_to_dims(dims)
if args.plot:
plt.figure()
plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o', color='tab:blue')
plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')
plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue', linewidth=1)
plt.xscale('log')
plt.xticks(dims, dims)
plt.minorticks_off()
plt.xlabel('embedding dimension')
plt.ylabel('AUC')
plt.legend()
oversampling_ratio = (sum((p.num_edges for p in patch_data)) / data.num_edges)
plt.title(f'oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}')
plt.savefig((output_folder / f'{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf'))
plt.show()
|
@classmethod
def load(cls, filename, replace=False):
'\n restore results from file\n\n Args:\n filename: input json file\n replace: set the replace attribute\n\n Returns:\n populated ResultsDict\n\n '
self = cls(replace=replace)
with open(filename) as f:
self._data.update(json.load(f))
return self
| -141,132,625,160,646,660
|
restore results from file
Args:
filename: input json file
replace: set the replace attribute
Returns:
populated ResultsDict
|
local2global_embedding/run.py
|
load
|
LJeub/Local2Global_embedding
|
python
|
@classmethod
def load(cls, filename, replace=False):
'\n restore results from file\n\n Args:\n filename: input json file\n replace: set the replace attribute\n\n Returns:\n populated ResultsDict\n\n '
self = cls(replace=replace)
with open(filename) as f:
self._data.update(json.load(f))
return self
|
def save(self, filename):
'\n dump contents to json file\n\n Args:\n filename: output file path\n\n '
with open(filename, 'w') as f:
json.dump(self._data, f)
| -7,717,427,815,994,800,000
|
dump contents to json file
Args:
filename: output file path
|
local2global_embedding/run.py
|
save
|
LJeub/Local2Global_embedding
|
python
|
def save(self, filename):
'\n dump contents to json file\n\n Args:\n filename: output file path\n\n '
with open(filename, 'w') as f:
json.dump(self._data, f)
|
def __init__(self, replace=False):
'\n initialise empty ResultsDict\n Args:\n replace: set the replace attribute (default: ``False``)\n '
self._data = {'dims': [], 'auc': [], 'args': []}
self.replace = replace
| 7,425,415,623,795,663,000
|
initialise empty ResultsDict
Args:
replace: set the replace attribute (default: ``False``)
|
local2global_embedding/run.py
|
__init__
|
LJeub/Local2Global_embedding
|
python
|
def __init__(self, replace=False):
'\n initialise empty ResultsDict\n Args:\n replace: set the replace attribute (default: ``False``)\n '
self._data = {'dims': [], 'auc': [], 'args': []}
self.replace = replace
|
def _update_index(self, index, aucs: list, args=None):
'\n update data for a given index\n\n Args:\n index: integer index into data lists\n aucs: new auc values (should be a list)\n args: new args data (optional)\n\n '
if self.replace:
self['auc'][index] = aucs
self['args'][index] = args
else:
self['auc'][index].extend(aucs)
self['args'][index].extend(([args] * len(aucs)))
| 2,771,856,164,189,648,400
|
update data for a given index
Args:
index: integer index into data lists
aucs: new auc values (should be a list)
args: new args data (optional)
|
local2global_embedding/run.py
|
_update_index
|
LJeub/Local2Global_embedding
|
python
|
def _update_index(self, index, aucs: list, args=None):
'\n update data for a given index\n\n Args:\n index: integer index into data lists\n aucs: new auc values (should be a list)\n args: new args data (optional)\n\n '
if self.replace:
self['auc'][index] = aucs
self['args'][index] = args
else:
self['auc'][index].extend(aucs)
self['args'][index].extend(([args] * len(aucs)))
|
def _insert_index(self, index: int, dim: int, aucs: list, args=None):
'\n insert new data at index\n\n Args:\n index: integer index into data lists\n dim: data dimension for index\n aucs: new auc values\n args: new args data (optional)\n '
self['auc'].insert(index, aucs)
self['dims'].insert(index, dim)
self['args'].insert(index, ([args] * len(aucs)))
| -2,603,589,553,267,202,000
|
insert new data at index
Args:
index: integer index into data lists
dim: data dimension for index
aucs: new auc values
args: new args data (optional)
|
local2global_embedding/run.py
|
_insert_index
|
LJeub/Local2Global_embedding
|
python
|
def _insert_index(self, index: int, dim: int, aucs: list, args=None):
'\n insert new data at index\n\n Args:\n index: integer index into data lists\n dim: data dimension for index\n aucs: new auc values\n args: new args data (optional)\n '
self['auc'].insert(index, aucs)
self['dims'].insert(index, dim)
self['args'].insert(index, ([args] * len(aucs)))
|
def update_dim(self, dim, aucs, args=None):
'\n update data for given dimension\n\n Args:\n dim: dimension to update\n aucs: new auc values\n args: new args data (optional)\n\n if ``self.contains_dim(dim) == True``, behaviour depends on the value of\n ``self.replace``\n\n '
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
self._update_index(index, aucs, args)
else:
self._insert_index(index, dim, aucs, args)
| 4,456,085,917,307,313,700
|
update data for given dimension
Args:
dim: dimension to update
aucs: new auc values
args: new args data (optional)
if ``self.contains_dim(dim) == True``, behaviour depends on the value of
``self.replace``
|
local2global_embedding/run.py
|
update_dim
|
LJeub/Local2Global_embedding
|
python
|
def update_dim(self, dim, aucs, args=None):
'\n update data for given dimension\n\n Args:\n dim: dimension to update\n aucs: new auc values\n args: new args data (optional)\n\n if ``self.contains_dim(dim) == True``, behaviour depends on the value of\n ``self.replace``\n\n '
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
self._update_index(index, aucs, args)
else:
self._insert_index(index, dim, aucs, args)
|
def max_auc(self, dim=None):
'\n return maximum auc values\n\n Args:\n dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.\n\n '
if (dim is None):
return [max(aucs) for aucs in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
return max(self['auc'][index])
else:
return 0.0
| -4,541,015,127,444,244,500
|
return maximum auc values
Args:
dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.
|
local2global_embedding/run.py
|
max_auc
|
LJeub/Local2Global_embedding
|
python
|
def max_auc(self, dim=None):
'\n return maximum auc values\n\n Args:\n dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.\n\n '
if (dim is None):
return [max(aucs) for aucs in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
return max(self['auc'][index])
else:
return 0.0
|
def contains_dim(self, dim):
"\n equivalent to ``dim in self['dims']``\n\n "
index = bisect_left(self['dims'], dim)
return ((index < len(self['dims'])) and (self['dims'][index] == dim))
| 4,224,711,043,312,171,000
|
equivalent to ``dim in self['dims']``
|
local2global_embedding/run.py
|
contains_dim
|
LJeub/Local2Global_embedding
|
python
|
def contains_dim(self, dim):
"\n \n\n "
index = bisect_left(self['dims'], dim)
return ((index < len(self['dims'])) and (self['dims'][index] == dim))
|
def reduce_to_dims(self, dims):
'\n remove all data for dimensions not in ``dims``\n Args:\n dims: list of dimensions to keep\n\n '
index = [i for (i, d) in enumerate(dims) if self.contains_dim(d)]
for key1 in self._data:
if isinstance(self._data[key1], list):
self._data[key1] = [self[key1][i] for i in index]
return self
| 7,847,196,897,316,981,000
|
remove all data for dimensions not in ``dims``
Args:
dims: list of dimensions to keep
|
local2global_embedding/run.py
|
reduce_to_dims
|
LJeub/Local2Global_embedding
|
python
|
def reduce_to_dims(self, dims):
'\n remove all data for dimensions not in ``dims``\n Args:\n dims: list of dimensions to keep\n\n '
index = [i for (i, d) in enumerate(dims) if self.contains_dim(d)]
for key1 in self._data:
if isinstance(self._data[key1], list):
self._data[key1] = [self[key1][i] for i in index]
return self
|
def runs(self, dim=None):
'\n return the number of runs\n\n Args:\n dim: if ``dim is None``, return list of number of runs for all dimension, else return number of\n runs for dimension ``dim``.\n\n '
if (dim is None):
return [len(x) for x in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
return len(self['auc'][index])
else:
return 0
| 9,131,347,349,148,236,000
|
return the number of runs
Args:
dim: if ``dim is None``, return list of number of runs for all dimension, else return number of
runs for dimension ``dim``.
|
local2global_embedding/run.py
|
runs
|
LJeub/Local2Global_embedding
|
python
|
def runs(self, dim=None):
'\n return the number of runs\n\n Args:\n dim: if ``dim is None``, return list of number of runs for all dimension, else return number of\n runs for dimension ``dim``.\n\n '
if (dim is None):
return [len(x) for x in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if ((index < len(self['dims'])) and (self['dims'][index] == dim)):
return len(self['auc'][index])
else:
return 0
|
def merge_func(op1, op2):
'Artificial example where a CZ will absorb any merge-able operation.'
for op in [op1, op2]:
if (op.gate == cirq.CZ):
return op
return None
| -3,623,384,611,022,538,000
|
Artificial example where a CZ will absorb any merge-able operation.
|
cirq-core/cirq/transformers/transformer_primitives_test.py
|
merge_func
|
TripleRD/Cirq
|
python
|
def merge_func(op1, op2):
for op in [op1, op2]:
if (op.gate == cirq.CZ):
return op
return None
|
@abstractmethod
def positioned(self, aEvent: 'EventObject_a3d70b03') -> None:
'\n is invoked when the database form has been positioned on a data record.\n '
| 3,457,554,679,496,431,000
|
is invoked when the database form has been positioned on a data record.
|
ooobuild/lo/form/x_positioning_listener.py
|
positioned
|
Amourspirit/ooo_uno_tmpl
|
python
|
@abstractmethod
def positioned(self, aEvent: 'EventObject_a3d70b03') -> None:
'\n \n '
|
def plot_cross_section(self):
' Plot the raw imported nist data '
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('Cross Section')
plt.xlabel('Angle')
plt.show()
| 742,893,952,309,889,700
|
Plot the raw imported nist data
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_cross_section
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_cross_section(self):
' '
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('Cross Section')
plt.xlabel('Angle')
plt.show()
|
def load_nist_cross_section(self, filename):
' Load nist data file of differential elastic scattering profile.\n Input:\n filename: filename of csv data from nist database\n Returns:\n cross_section_y: given cross section in range -90 to 90 deg '
filepath = ((os.path.dirname(os.path.abspath(__file__)).partition('controller')[0] + '\\data\\NIST cross sections\\') + filename)
data = np.genfromtxt(filepath, skip_header=10, delimiter=',')
self.cross_section_y = self._convert_nist_data(data)
self.cross_section_x = np.arange((- 90), 90, 1)
return self.cross_section_y
| -3,489,505,185,187,247,000
|
Load nist data file of differential elastic scattering profile.
Input:
filename: filename of csv data from nist database
Returns:
cross_section_y: given cross section in range -90 to 90 deg
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
load_nist_cross_section
|
surfaceanalytics/inelasticscattering
|
python
|
def load_nist_cross_section(self, filename):
' Load nist data file of differential elastic scattering profile.\n Input:\n filename: filename of csv data from nist database\n Returns:\n cross_section_y: given cross section in range -90 to 90 deg '
filepath = ((os.path.dirname(os.path.abspath(__file__)).partition('controller')[0] + '\\data\\NIST cross sections\\') + filename)
data = np.genfromtxt(filepath, skip_header=10, delimiter=',')
self.cross_section_y = self._convert_nist_data(data)
self.cross_section_x = np.arange((- 90), 90, 1)
return self.cross_section_y
|
def plot_nist(self):
' Plot the raw imported nist data '
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('NIST Data')
plt.xlabel('Angle')
plt.show()
| -2,960,817,490,565,703,700
|
Plot the raw imported nist data
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_nist
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_nist(self):
' '
plt.plot(self.cross_section_x, self.cross_section_y)
plt.title('NIST Data')
plt.xlabel('Angle')
plt.show()
|
def run_convolution(self):
' Run convolution between the nist cross section and a sine curve\n representing initial scattering distribution.\n Returns:\n centered_data: angular distribution spread after each scattering\n event\n '
self.cross_section_y_norm = (self.cross_section_y / np.sum(self.cross_section_y))
self.emitted_elctn_y = self._gen_electron_dist()
self.emitted_elctn_x = np.arange((- 90), 90, 1)
convolved_data = self._convolution(self.cross_section_y_norm, self.emitted_elctn_y, self.iterations)
self.centered_data = self._centre_data(convolved_data)
return self.centered_data
| 3,539,386,310,495,746,000
|
Run convolution between the nist cross section and a sine curve
representing initial scattering distribution.
Returns:
centered_data: angular distribution spread after each scattering
event
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
run_convolution
|
surfaceanalytics/inelasticscattering
|
python
|
def run_convolution(self):
' Run convolution between the nist cross section and a sine curve\n representing initial scattering distribution.\n Returns:\n centered_data: angular distribution spread after each scattering\n event\n '
self.cross_section_y_norm = (self.cross_section_y / np.sum(self.cross_section_y))
self.emitted_elctn_y = self._gen_electron_dist()
self.emitted_elctn_x = np.arange((- 90), 90, 1)
convolved_data = self._convolution(self.cross_section_y_norm, self.emitted_elctn_y, self.iterations)
self.centered_data = self._centre_data(convolved_data)
return self.centered_data
|
def plot_convolution_results(self):
' Plot convolution result to show angular distribution spread after\n each scattering event.'
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.centered_data[n], label=str(n))
plt.xticks([(- 90), (- 60), (- 30), 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Angular distribution per scattering event')
plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
| 820,424,270,423,335,800
|
Plot convolution result to show angular distribution spread after
each scattering event.
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_convolution_results
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_convolution_results(self):
' Plot convolution result to show angular distribution spread after\n each scattering event.'
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.centered_data[n], label=str(n))
plt.xticks([(- 90), (- 60), (- 30), 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Angular distribution per scattering event')
plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
|
def limit_by_acceptance_angle(self):
' Limit the data to the acceptance angle of the analyser '
self.angle_limited = self._limit_by_constant_angle(self.centered_data, self.acceptance_angle)
| -5,836,222,258,590,825,000
|
Limit the data to the acceptance angle of the analyser
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
limit_by_acceptance_angle
|
surfaceanalytics/inelasticscattering
|
python
|
def limit_by_acceptance_angle(self):
' '
self.angle_limited = self._limit_by_constant_angle(self.centered_data, self.acceptance_angle)
|
def plot_angle_limited(self):
' Plot the convolution results only in the accepted angle range'
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.angle_limited[n], label=str(n))
plt.xticks([(- 90), (- 60), (- 30), 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Intensity distribution after scattering event')
plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
| 5,782,635,014,783,489,000
|
Plot the convolution results only in the accepted angle range
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_angle_limited
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_angle_limited(self):
' '
for n in [0, 1, 2, 5, 10, 20, 50]:
plt.plot(self.emitted_elctn_x, self.angle_limited[n], label=str(n))
plt.xticks([(- 90), (- 60), (- 30), 0, 30, 60, 90])
plt.xlabel('theta (degrees)')
plt.ylabel('Intensity (a.u.)')
plt.title('Intensity distribution after scattering event')
plt.legend(title='No. of iterations', loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
|
def calc_area_under_curve(self):
' Calculate area under each curve within acceptance angle,\n represents intensity that the detector sees'
sin = np.absolute(np.sin(((np.arange((- 90), 90, 1) * np.pi) / 180)))
angle_integrated = ((self.angle_limited * sin) * np.pi)
self.area_sum = np.sum(angle_integrated, axis=1)
self.area_sum = (self.area_sum / self.area_sum[0])
return self.area_sum
| -5,156,297,809,877,121,000
|
Calculate area under each curve within acceptance angle,
represents intensity that the detector sees
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
calc_area_under_curve
|
surfaceanalytics/inelasticscattering
|
python
|
def calc_area_under_curve(self):
' Calculate area under each curve within acceptance angle,\n represents intensity that the detector sees'
sin = np.absolute(np.sin(((np.arange((- 90), 90, 1) * np.pi) / 180)))
angle_integrated = ((self.angle_limited * sin) * np.pi)
self.area_sum = np.sum(angle_integrated, axis=1)
self.area_sum = (self.area_sum / self.area_sum[0])
return self.area_sum
|
def plot_area_under_curve(self):
' Plot area under curve per scattering event / iteration '
plt.plot(self.area_sum)
plt.title((((('area under curve \n (Energy: ' + str(self.energy)) + ', Acceptance Angle: ') + str(self.acceptance_angle)) + ')'))
plt.xlabel('No. of iterations')
plt.ylabel('Intensity a.u.')
plt.show()
| -2,997,363,733,917,999,000
|
Plot area under curve per scattering event / iteration
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_area_under_curve
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_area_under_curve(self):
' '
plt.plot(self.area_sum)
plt.title((((('area under curve \n (Energy: ' + str(self.energy)) + ', Acceptance Angle: ') + str(self.acceptance_angle)) + ')'))
plt.xlabel('No. of iterations')
plt.ylabel('Intensity a.u.')
plt.show()
|
def calc_area_ratio(self):
' Calculate the change in area ratio between iteration n and n-1'
self.area_ratio_list = self._area_ratio_change(self.area_sum)
return self.area_ratio_list
| -6,407,582,401,457,896,000
|
Calculate the change in area ratio between iteration n and n-1
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
calc_area_ratio
|
surfaceanalytics/inelasticscattering
|
python
|
def calc_area_ratio(self):
' '
self.area_ratio_list = self._area_ratio_change(self.area_sum)
return self.area_ratio_list
|
def plot_area_ratio(self):
' Plot the change in area ratio per iteration '
plt.plot(self.area_ratio_list)
plt.title((((('Intensity ratio change per iteration \n (Energy: ' + str(self.energy)) + ' eV, Acceptance Angle: ') + str(self.acceptance_angle)) + ')'))
plt.xlabel('Iterations')
plt.ylabel('Area Ratio between iterations')
plt.show()
| -7,336,764,495,966,653,000
|
Plot the change in area ratio per iteration
|
model/algorithms/legacy/angular_spread_lorentzian.py
|
plot_area_ratio
|
surfaceanalytics/inelasticscattering
|
python
|
def plot_area_ratio(self):
' '
plt.plot(self.area_ratio_list)
plt.title((((('Intensity ratio change per iteration \n (Energy: ' + str(self.energy)) + ' eV, Acceptance Angle: ') + str(self.acceptance_angle)) + ')'))
plt.xlabel('Iterations')
plt.ylabel('Area Ratio between iterations')
plt.show()
|
def __eq__(self, *args):
' x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y '
pass
| 2,144,965,521,805,394,200
|
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Domain.py
|
__eq__
|
BCSharp/ironpython-stubs
|
python
|
def __eq__(self, *args):
' '
pass
|
def __format__(self, *args):
' __format__(formattable: IFormattable,format: str) -> str '
pass
| -4,894,195,495,142,889,000
|
__format__(formattable: IFormattable,format: str) -> str
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Domain.py
|
__format__
|
BCSharp/ironpython-stubs
|
python
|
def __format__(self, *args):
' '
pass
|
def __init__(self, *args):
' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature '
pass
| -90,002,593,062,007,400
|
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Domain.py
|
__init__
|
BCSharp/ironpython-stubs
|
python
|
def __init__(self, *args):
' '
pass
|
def run(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: the outputs of the process\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run(process, *args, **inputs)
| -9,197,858,556,187,132,000
|
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: the outputs of the process
|
aiida/work/launch.py
|
run
|
JuDFTteam/aiida_core
|
python
|
def run(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: the outputs of the process\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run(process, *args, **inputs)
|
def run_get_node(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: tuple of the outputs of the process and the calculation node\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_node(process, *args, **inputs)
| 282,945,995,080,449,020
|
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and the calculation node
|
aiida/work/launch.py
|
run_get_node
|
JuDFTteam/aiida_core
|
python
|
def run_get_node(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: tuple of the outputs of the process and the calculation node\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_node(process, *args, **inputs)
|
def run_get_pid(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: tuple of the outputs of the process and process pid\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_pid(process, *args, **inputs)
| -3,261,614,733,050,708,500
|
Run the process with the supplied inputs in a local runner that will block until the process is completed.
The return value will be the results of the completed process
:param process: the process class or workfunction to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and process pid
|
aiida/work/launch.py
|
run_get_pid
|
JuDFTteam/aiida_core
|
python
|
def run_get_pid(process, *args, **inputs):
'\n Run the process with the supplied inputs in a local runner that will block until the process is completed.\n The return value will be the results of the completed process\n\n :param process: the process class or workfunction to run\n :param inputs: the inputs to be passed to the process\n :return: tuple of the outputs of the process and process pid\n '
if isinstance(process, processes.Process):
runner = process.runner
else:
runner = manager.AiiDAManager.get_runner()
return runner.run_get_pid(process, *args, **inputs)
|
def submit(process, **inputs):
'\n Submit the process with the supplied inputs to the daemon runners immediately returning control to\n the interpreter. The return value will be the calculation node of the submitted process.\n\n :param process: the process class to submit\n :param inputs: the inputs to be passed to the process\n :return: the calculation node of the process\n '
assert (not utils.is_workfunction(process)), 'Cannot submit a workfunction'
runner = manager.AiiDAManager.get_runner()
controller = manager.AiiDAManager.get_process_controller()
process = processes.instantiate_process(runner, process, **inputs)
runner.persister.save_checkpoint(process)
process.close()
controller.continue_process(process.pid, nowait=False, no_reply=True)
return process.calc
| 5,516,377,438,263,609,000
|
Submit the process with the supplied inputs to the daemon runners immediately returning control to
the interpreter. The return value will be the calculation node of the submitted process.
:param process: the process class to submit
:param inputs: the inputs to be passed to the process
:return: the calculation node of the process
|
aiida/work/launch.py
|
submit
|
JuDFTteam/aiida_core
|
python
|
def submit(process, **inputs):
'\n Submit the process with the supplied inputs to the daemon runners immediately returning control to\n the interpreter. The return value will be the calculation node of the submitted process.\n\n :param process: the process class to submit\n :param inputs: the inputs to be passed to the process\n :return: the calculation node of the process\n '
assert (not utils.is_workfunction(process)), 'Cannot submit a workfunction'
runner = manager.AiiDAManager.get_runner()
controller = manager.AiiDAManager.get_process_controller()
process = processes.instantiate_process(runner, process, **inputs)
runner.persister.save_checkpoint(process)
process.close()
controller.continue_process(process.pid, nowait=False, no_reply=True)
return process.calc
|
def __init__(self, env: gym.Env, combined_observation_space: Tuple[(Tuple[(int, int, int)], int)], lr: float, gamma: float, epsilon: float, epsilon_decay: float, target_update_interval: int=100, log_wandb: bool=False, replay_buffer: Optional[ReplayBuffer]=None, fc_layers: Optional[List[int]]=None, conv_layers: Optional[List[int]]=None):
"\n Construct a new 'Deep Q-Network' object.\n\n :param env: The environment of the game\n :param lr: The learning rate of the agent\n :param gamma: The amount of weight it gives to future rewards in the value function\n :param epsilon: The probability where we do not go with the “greedy” action with the highest Q-value but rather choose a random action\n :param epsilon_decay: The rate by which epsilon decreases after an episode\n :param target_update_interval: The interval between updates of the target network\n :param replay_buffer: Replay memory object to store and sample observations from for training.\n Defaults to double-end queue with maximum length of 500_000 steps.\n "
self.log_wandb = log_wandb
self.env = env
self.action_space = env.action_space
self.combined_observation_space = combined_observation_space
self.lr = lr
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.target_update_interval = target_update_interval
self.rewards_list = []
self.buffer = (replay_buffer if replay_buffer else ReplayBuffer(maxlen=2500))
self.batch_size = 64
self.epsilon_min = 0.01
self.num_action_space = 4
self.fc_layers = ([128, 128, 128] if (not fc_layers) else fc_layers)
assert (len(self.fc_layers) >= 1), 'You need at least one hidden layer'
self.conv_layers = ([32, 64, 128] if (not conv_layers) else conv_layers)
assert (len(self.conv_layers) >= 1), 'You need at least one hidden layer'
self.model = self.initialize_model()
self.model_target = clone_model(self.model)
if self.log_wandb:
wandb.config.update({'lr': self.lr, 'gamma': self.gamma, 'epsilon': self.epsilon, 'epsilon_decay': self.epsilon_decay, 'target_update_interval': self.target_update_interval, 'batch_size': self.batch_size, 'fc_layers': self.fc_layers})
| -1,705,906,103,581,523,700
|
Construct a new 'Deep Q-Network' object.
:param env: The environment of the game
:param lr: The learning rate of the agent
:param gamma: The amount of weight it gives to future rewards in the value function
:param epsilon: The probability where we do not go with the “greedy” action with the highest Q-value but rather choose a random action
:param epsilon_decay: The rate by which epsilon decreases after an episode
:param target_update_interval: The interval between updates of the target network
:param replay_buffer: Replay memory object to store and sample observations from for training.
Defaults to double-end queue with maximum length of 500_000 steps.
|
RL/Snake-DQN/model/dqn_engineered.py
|
__init__
|
kiritowu/Deep-Learning
|
python
|
def __init__(self, env: gym.Env, combined_observation_space: Tuple[(Tuple[(int, int, int)], int)], lr: float, gamma: float, epsilon: float, epsilon_decay: float, target_update_interval: int=100, log_wandb: bool=False, replay_buffer: Optional[ReplayBuffer]=None, fc_layers: Optional[List[int]]=None, conv_layers: Optional[List[int]]=None):
"\n Construct a new 'Deep Q-Network' object.\n\n :param env: The environment of the game\n :param lr: The learning rate of the agent\n :param gamma: The amount of weight it gives to future rewards in the value function\n :param epsilon: The probability where we do not go with the “greedy” action with the highest Q-value but rather choose a random action\n :param epsilon_decay: The rate by which epsilon decreases after an episode\n :param target_update_interval: The interval between updates of the target network\n :param replay_buffer: Replay memory object to store and sample observations from for training.\n Defaults to double-end queue with maximum length of 500_000 steps.\n "
self.log_wandb = log_wandb
self.env = env
self.action_space = env.action_space
self.combined_observation_space = combined_observation_space
self.lr = lr
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.target_update_interval = target_update_interval
self.rewards_list = []
self.buffer = (replay_buffer if replay_buffer else ReplayBuffer(maxlen=2500))
self.batch_size = 64
self.epsilon_min = 0.01
self.num_action_space = 4
self.fc_layers = ([128, 128, 128] if (not fc_layers) else fc_layers)
assert (len(self.fc_layers) >= 1), 'You need at least one hidden layer'
self.conv_layers = ([32, 64, 128] if (not conv_layers) else conv_layers)
assert (len(self.conv_layers) >= 1), 'You need at least one hidden layer'
self.model = self.initialize_model()
self.model_target = clone_model(self.model)
if self.log_wandb:
wandb.config.update({'lr': self.lr, 'gamma': self.gamma, 'epsilon': self.epsilon, 'epsilon_decay': self.epsilon_decay, 'target_update_interval': self.target_update_interval, 'batch_size': self.batch_size, 'fc_layers': self.fc_layers})
|
def build_wrapper(img_size: types_of_loco.input_img_size=28, channels: int=3, model_name: str='model1', optimizer: Optimizer=SGD()) -> Union[(ModelBuilder, pytorch_builder.PytorchModelBuilder)]:
'\n モデル生成をする関数を返す\n 交差検証をかける際のラッパーとして使う\n :param img_size:\n :param channels:\n :param model_name:\n :param optimizer:\n :return:\n '
if callable(optimizer):
return pytorch_builder.PytorchModelBuilder(img_size=img_size, channels=channels, model_name=model_name, opt_builder=optimizer)
return keras_builder.build_wrapper(img_size, channels, model_name, optimizer)
| -802,269,240,548,085,500
|
モデル生成をする関数を返す
交差検証をかける際のラッパーとして使う
:param img_size:
:param channels:
:param model_name:
:param optimizer:
:return:
|
network_model/model_builder.py
|
build_wrapper
|
Tetuwo181/ModelLearner
|
python
|
def build_wrapper(img_size: types_of_loco.input_img_size=28, channels: int=3, model_name: str='model1', optimizer: Optimizer=SGD()) -> Union[(ModelBuilder, pytorch_builder.PytorchModelBuilder)]:
'\n モデル生成をする関数を返す\n 交差検証をかける際のラッパーとして使う\n :param img_size:\n :param channels:\n :param model_name:\n :param optimizer:\n :return:\n '
if callable(optimizer):
return pytorch_builder.PytorchModelBuilder(img_size=img_size, channels=channels, model_name=model_name, opt_builder=optimizer)
return keras_builder.build_wrapper(img_size, channels, model_name, optimizer)
|
def builder_of_generator(class_num: int, channels: int=1, optimizer: Optimizer=SGD()):
'\n Ganのgenerator部を作成する\n :param class_num\n :param channels:色の出力変数(白黒画像なら1)\n :param optimizer: 2次元の畳み込みウィンドウの幅と高さ 整数なら縦横比同じに\n :return: discriminator部のモデル\n '
return builder(class_num, size, channels, optimizer)
| -6,910,396,911,601,175,000
|
Ganのgenerator部を作成する
:param class_num
:param channels:色の出力変数(白黒画像なら1)
:param optimizer: 2次元の畳み込みウィンドウの幅と高さ 整数なら縦横比同じに
:return: discriminator部のモデル
|
network_model/model_builder.py
|
builder_of_generator
|
Tetuwo181/ModelLearner
|
python
|
def builder_of_generator(class_num: int, channels: int=1, optimizer: Optimizer=SGD()):
'\n Ganのgenerator部を作成する\n :param class_num\n :param channels:色の出力変数(白黒画像なら1)\n :param optimizer: 2次元の畳み込みウィンドウの幅と高さ 整数なら縦横比同じに\n :return: discriminator部のモデル\n '
return builder(class_num, size, channels, optimizer)
|
def connection_made(self, transport):
'asyncio callback when a connection is opened.'
assert (not self._transport)
logger.debug(('Connected & Listening: %s:%d' % (self.dstaddr, self.dstport)))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None
self.on_open()
| 7,884,721,591,143,972,000
|
asyncio callback when a connection is opened.
|
test/functional/test_framework/mininode.py
|
connection_made
|
BitcoinSN/BitcoinSN
|
python
|
def connection_made(self, transport):
assert (not self._transport)
logger.debug(('Connected & Listening: %s:%d' % (self.dstaddr, self.dstport)))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None
self.on_open()
|
def connection_lost(self, exc):
'asyncio callback when a connection is closed.'
if exc:
logger.warning('Connection lost to {}:{} due to {}'.format(self.dstaddr, self.dstport, exc))
else:
logger.debug(('Closed connection to: %s:%d' % (self.dstaddr, self.dstport)))
self._transport = None
self.recvbuf = b''
self.on_close()
| -3,005,130,395,724,729
|
asyncio callback when a connection is closed.
|
test/functional/test_framework/mininode.py
|
connection_lost
|
BitcoinSN/BitcoinSN
|
python
|
def connection_lost(self, exc):
if exc:
logger.warning('Connection lost to {}:{} due to {}'.format(self.dstaddr, self.dstport, exc))
else:
logger.debug(('Closed connection to: %s:%d' % (self.dstaddr, self.dstport)))
self._transport = None
self.recvbuf = b
self.on_close()
|
def data_received(self, t):
'asyncio callback when data is read from the socket.'
if (len(t) > 0):
self.recvbuf += t
self._on_data()
| 993,073,361,923,927,400
|
asyncio callback when data is read from the socket.
|
test/functional/test_framework/mininode.py
|
data_received
|
BitcoinSN/BitcoinSN
|
python
|
def data_received(self, t):
if (len(t) > 0):
self.recvbuf += t
self._on_data()
|
def _on_data(self):
'Try to read P2P messages from the recv buffer.\n\n This method reads data from the buffer in a loop. It deserializes,\n parses and verifies the P2P header, then passes the P2P payload to\n the on_message callback for processing.'
try:
while True:
if (len(self.recvbuf) < 4):
return
if (self.recvbuf[:4] != MAGIC_BYTES[self.network]):
raise ValueError(('got garbage %s' % repr(self.recvbuf)))
if (len(self.recvbuf) < (((4 + 12) + 4) + 4)):
return
command = self.recvbuf[4:(4 + 12)].split(b'\x00', 1)[0]
msglen = struct.unpack('<i', self.recvbuf[(4 + 12):((4 + 12) + 4)])[0]
checksum = self.recvbuf[((4 + 12) + 4):(((4 + 12) + 4) + 4)]
if (len(self.recvbuf) < ((((4 + 12) + 4) + 4) + msglen)):
return
msg = self.recvbuf[(((4 + 12) + 4) + 4):((((4 + 12) + 4) + 4) + msglen)]
th = sha256(msg)
h = sha256(th)
if (checksum != h[:4]):
raise ValueError(('got bad checksum ' + repr(self.recvbuf)))
self.recvbuf = self.recvbuf[((((4 + 12) + 4) + 4) + msglen):]
if (command not in MESSAGEMAP):
raise ValueError(("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message('receive', t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
| -8,964,093,991,799,058,000
|
Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing.
|
test/functional/test_framework/mininode.py
|
_on_data
|
BitcoinSN/BitcoinSN
|
python
|
def _on_data(self):
'Try to read P2P messages from the recv buffer.\n\n This method reads data from the buffer in a loop. It deserializes,\n parses and verifies the P2P header, then passes the P2P payload to\n the on_message callback for processing.'
try:
while True:
if (len(self.recvbuf) < 4):
return
if (self.recvbuf[:4] != MAGIC_BYTES[self.network]):
raise ValueError(('got garbage %s' % repr(self.recvbuf)))
if (len(self.recvbuf) < (((4 + 12) + 4) + 4)):
return
command = self.recvbuf[4:(4 + 12)].split(b'\x00', 1)[0]
msglen = struct.unpack('<i', self.recvbuf[(4 + 12):((4 + 12) + 4)])[0]
checksum = self.recvbuf[((4 + 12) + 4):(((4 + 12) + 4) + 4)]
if (len(self.recvbuf) < ((((4 + 12) + 4) + 4) + msglen)):
return
msg = self.recvbuf[(((4 + 12) + 4) + 4):((((4 + 12) + 4) + 4) + msglen)]
th = sha256(msg)
h = sha256(th)
if (checksum != h[:4]):
raise ValueError(('got bad checksum ' + repr(self.recvbuf)))
self.recvbuf = self.recvbuf[((((4 + 12) + 4) + 4) + msglen):]
if (command not in MESSAGEMAP):
raise ValueError(("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message('receive', t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
|
def on_message(self, message):
'Callback for processing a P2P payload. Must be overridden by derived class.'
raise NotImplementedError
| -7,141,849,742,548,494,000
|
Callback for processing a P2P payload. Must be overridden by derived class.
|
test/functional/test_framework/mininode.py
|
on_message
|
BitcoinSN/BitcoinSN
|
python
|
def on_message(self, message):
raise NotImplementedError
|
def send_message(self, message):
'Send a P2P message over the socket.\n\n This method takes a P2P payload, builds the P2P header and adds\n the message to the send buffer to be sent over the socket.'
if (not self.is_connected):
raise IOError('Not connected')
self._log_message('send', message)
tmsg = self._build_message(message)
def maybe_write():
if (not self._transport):
return
if (hasattr(self._transport, 'is_closing') and self._transport.is_closing()):
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
| -2,728,624,306,352,936,000
|
Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket.
|
test/functional/test_framework/mininode.py
|
send_message
|
BitcoinSN/BitcoinSN
|
python
|
def send_message(self, message):
'Send a P2P message over the socket.\n\n This method takes a P2P payload, builds the P2P header and adds\n the message to the send buffer to be sent over the socket.'
if (not self.is_connected):
raise IOError('Not connected')
self._log_message('send', message)
tmsg = self._build_message(message)
def maybe_write():
if (not self._transport):
return
if (hasattr(self._transport, 'is_closing') and self._transport.is_closing()):
return
self._transport.write(tmsg)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
|
def _build_message(self, message):
'Build a serialized P2P message'
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += (b'\x00' * (12 - len(command)))
tmsg += struct.pack('<I', len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
| -7,292,992,019,461,254,000
|
Build a serialized P2P message
|
test/functional/test_framework/mininode.py
|
_build_message
|
BitcoinSN/BitcoinSN
|
python
|
def _build_message(self, message):
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += (b'\x00' * (12 - len(command)))
tmsg += struct.pack('<I', len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
|
def _log_message(self, direction, msg):
'Logs a message being sent or received over the connection.'
if (direction == 'send'):
log_message = 'Send message to '
elif (direction == 'receive'):
log_message = 'Received message from '
log_message += ('%s:%d: %s' % (self.dstaddr, self.dstport, repr(msg)[:500]))
if (len(log_message) > 500):
log_message += '... (msg truncated)'
logger.debug(log_message)
| 7,418,905,072,498,204,000
|
Logs a message being sent or received over the connection.
|
test/functional/test_framework/mininode.py
|
_log_message
|
BitcoinSN/BitcoinSN
|
python
|
def _log_message(self, direction, msg):
if (direction == 'send'):
log_message = 'Send message to '
elif (direction == 'receive'):
log_message = 'Received message from '
log_message += ('%s:%d: %s' % (self.dstaddr, self.dstport, repr(msg)[:500]))
if (len(log_message) > 500):
log_message += '... (msg truncated)'
logger.debug(log_message)
|
def on_message(self, message):
'Receive message and dispatch message to appropriate callback.\n\n We keep a count of how many of each message type has been received\n and the most recent message of each type.'
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, ('on_' + command))(message)
except:
print(('ERROR delivering %s (%s)' % (repr(message), sys.exc_info()[0])))
raise
| -6,178,374,556,390,762,000
|
Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
|
test/functional/test_framework/mininode.py
|
on_message
|
BitcoinSN/BitcoinSN
|
python
|
def on_message(self, message):
'Receive message and dispatch message to appropriate callback.\n\n We keep a count of how many of each message type has been received\n and the most recent message of each type.'
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, ('on_' + command))(message)
except:
print(('ERROR delivering %s (%s)' % (repr(message), sys.exc_info()[0])))
raise
|
def wait_for_getdata(self, timeout=60):
'Waits for a getdata message.\n\n Receiving any getdata message will satisfy the predicate. the last_message["getdata"]\n value must be explicitly cleared before calling this method, or this will return\n immediately with success. TODO: change this method to take a hash value and only\n return true if the correct block/tx has been requested.'
test_function = (lambda : self.last_message.get('getdata'))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
| -9,031,565,317,313,411,000
|
Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested.
|
test/functional/test_framework/mininode.py
|
wait_for_getdata
|
BitcoinSN/BitcoinSN
|
python
|
def wait_for_getdata(self, timeout=60):
'Waits for a getdata message.\n\n Receiving any getdata message will satisfy the predicate. the last_message["getdata"]\n value must be explicitly cleared before calling this method, or this will return\n immediately with success. TODO: change this method to take a hash value and only\n return true if the correct block/tx has been requested.'
test_function = (lambda : self.last_message.get('getdata'))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
def wait_for_getheaders(self, timeout=60):
'Waits for a getheaders message.\n\n Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]\n value must be explicitly cleared before calling this method, or this will return\n immediately with success. TODO: change this method to take a hash value and only\n return true if the correct block header has been requested.'
test_function = (lambda : self.last_message.get('getheaders'))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
| -8,589,494,662,717,943,000
|
Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested.
|
test/functional/test_framework/mininode.py
|
wait_for_getheaders
|
BitcoinSN/BitcoinSN
|
python
|
def wait_for_getheaders(self, timeout=60):
'Waits for a getheaders message.\n\n Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]\n value must be explicitly cleared before calling this method, or this will return\n immediately with success. TODO: change this method to take a hash value and only\n return true if the correct block header has been requested.'
test_function = (lambda : self.last_message.get('getheaders'))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
def wait_for_inv(self, expected_inv, timeout=60):
'Waits for an INV message and checks that the first inv object in the message was as expected.'
if (len(expected_inv) > 1):
raise NotImplementedError('wait_for_inv() will only verify the first inv object')
test_function = (lambda : (self.last_message.get('inv') and (self.last_message['inv'].inv[0].type == expected_inv[0].type) and (self.last_message['inv'].inv[0].hash == expected_inv[0].hash)))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
| -3,942,258,822,374,831,600
|
Waits for an INV message and checks that the first inv object in the message was as expected.
|
test/functional/test_framework/mininode.py
|
wait_for_inv
|
BitcoinSN/BitcoinSN
|
python
|
def wait_for_inv(self, expected_inv, timeout=60):
if (len(expected_inv) > 1):
raise NotImplementedError('wait_for_inv() will only verify the first inv object')
test_function = (lambda : (self.last_message.get('inv') and (self.last_message['inv'].inv[0].type == expected_inv[0].type) and (self.last_message['inv'].inv[0].hash == expected_inv[0].hash)))
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
def run(self):
'Start the network thread.'
self.network_event_loop.run_forever()
| 9,011,189,419,497,338,000
|
Start the network thread.
|
test/functional/test_framework/mininode.py
|
run
|
BitcoinSN/BitcoinSN
|
python
|
def run(self):
self.network_event_loop.run_forever()
|
def close(self, timeout=10):
'Close the connections and network event loop.'
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until((lambda : (not self.network_event_loop.is_running())), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
| -5,017,505,405,062,556,000
|
Close the connections and network event loop.
|
test/functional/test_framework/mininode.py
|
close
|
BitcoinSN/BitcoinSN
|
python
|
def close(self, timeout=10):
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until((lambda : (not self.network_event_loop.is_running())), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
|
def on_getdata(self, message):
'Check for the tx/block in our stores and if found, reply with an inv message.'
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (((inv.type & MSG_TYPE_MASK) == MSG_TX) and (inv.hash in self.tx_store.keys())):
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (((inv.type & MSG_TYPE_MASK) == MSG_BLOCK) and (inv.hash in self.block_store.keys())):
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
| -3,934,145,937,144,671,000
|
Check for the tx/block in our stores and if found, reply with an inv message.
|
test/functional/test_framework/mininode.py
|
on_getdata
|
BitcoinSN/BitcoinSN
|
python
|
def on_getdata(self, message):
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (((inv.type & MSG_TYPE_MASK) == MSG_TX) and (inv.hash in self.tx_store.keys())):
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (((inv.type & MSG_TYPE_MASK) == MSG_BLOCK) and (inv.hash in self.block_store.keys())):
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
|
def on_getheaders(self, message):
'Search back through our block store for the locator, and reply with a headers message if found.'
(locator, hash_stop) = (message.locator, message.hashstop)
if (not self.block_store):
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while (headers_list[(- 1)].sha256 not in locator.vHave):
prev_block_hash = headers_list[(- 1)].hashPrevBlock
if (prev_block_hash in self.block_store):
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if (prev_block_header.sha256 == hash_stop):
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
headers_list = headers_list[:((- maxheaders) - 1):(- 1)]
response = msg_headers(headers_list)
if (response is not None):
self.send_message(response)
| 6,135,390,170,369,099,000
|
Search back through our block store for the locator, and reply with a headers message if found.
|
test/functional/test_framework/mininode.py
|
on_getheaders
|
BitcoinSN/BitcoinSN
|
python
|
def on_getheaders(self, message):
(locator, hash_stop) = (message.locator, message.hashstop)
if (not self.block_store):
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while (headers_list[(- 1)].sha256 not in locator.vHave):
prev_block_hash = headers_list[(- 1)].hashPrevBlock
if (prev_block_hash in self.block_store):
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if (prev_block_header.sha256 == hash_stop):
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
headers_list = headers_list[:((- maxheaders) - 1):(- 1)]
response = msg_headers(headers_list)
if (response is not None):
self.send_message(response)
|
def on_reject(self, message):
'Store reject reason and code for testing.'
self.reject_code_received = message.code
self.reject_reason_received = message.reason
| 2,698,813,715,860,074,500
|
Store reject reason and code for testing.
|
test/functional/test_framework/mininode.py
|
on_reject
|
BitcoinSN/BitcoinSN
|
python
|
def on_reject(self, message):
self.reject_code_received = message.code
self.reject_reason_received = message.reason
|
def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"Send blocks to test node and test whether the tip advances.\n\n - add all blocks to our block_store\n - send a headers message for the final block\n - the on_getheaders handler will ensure that any getheaders are responded to\n - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will\n ensure that any getdata messages are responded to\n - if success is True: assert that the node's tip advances to the most recent block\n - if success is False: assert that the node's tip doesn't advance\n - if reject_code and reject_reason are set: assert that the correct reject message is received"
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[(- 1)])]))
if request_block:
wait_until((lambda : (blocks[(- 1)].sha256 in self.getdata_requests)), timeout=timeout, lock=mininode_lock)
if success:
wait_until((lambda : (rpc.getbestblockhash() == blocks[(- 1)].hash)), timeout=timeout)
else:
assert (rpc.getbestblockhash() != blocks[(- 1)].hash)
if (reject_code is not None):
wait_until((lambda : (self.reject_code_received == reject_code)), lock=mininode_lock)
if (reject_reason is not None):
wait_until((lambda : (self.reject_reason_received == reject_reason)), lock=mininode_lock)
| -2,911,765,054,968,182,000
|
Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_code and reject_reason are set: assert that the correct reject message is received
|
test/functional/test_framework/mininode.py
|
send_blocks_and_test
|
BitcoinSN/BitcoinSN
|
python
|
def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
"Send blocks to test node and test whether the tip advances.\n\n - add all blocks to our block_store\n - send a headers message for the final block\n - the on_getheaders handler will ensure that any getheaders are responded to\n - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will\n ensure that any getdata messages are responded to\n - if success is True: assert that the node's tip advances to the most recent block\n - if success is False: assert that the node's tip doesn't advance\n - if reject_code and reject_reason are set: assert that the correct reject message is received"
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
self.send_message(msg_headers([CBlockHeader(blocks[(- 1)])]))
if request_block:
wait_until((lambda : (blocks[(- 1)].sha256 in self.getdata_requests)), timeout=timeout, lock=mininode_lock)
if success:
wait_until((lambda : (rpc.getbestblockhash() == blocks[(- 1)].hash)), timeout=timeout)
else:
assert (rpc.getbestblockhash() != blocks[(- 1)].hash)
if (reject_code is not None):
wait_until((lambda : (self.reject_code_received == reject_code)), lock=mininode_lock)
if (reject_reason is not None):
wait_until((lambda : (self.reject_reason_received == reject_reason)), lock=mininode_lock)
|
def send_txs_and_test(self, txs, rpc, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"Send txs to test node and test whether they're accepted to the mempool.\n\n - add all txs to our tx_store\n - send tx messages for all txs\n - if success is True/False: assert that the txs are/are not accepted to the mempool\n - if expect_disconnect is True: Skip the sync with ping\n - if reject_code and reject_reason are set: assert that the correct reject message is received."
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = rpc.getrawmempool()
if success:
for tx in txs:
assert (tx.hash in raw_mempool), '{} not found in mempool'.format(tx.hash)
else:
for tx in txs:
assert (tx.hash not in raw_mempool), '{} tx found in mempool'.format(tx.hash)
if (reject_code is not None):
wait_until((lambda : (self.reject_code_received == reject_code)), lock=mininode_lock)
if (reject_reason is not None):
wait_until((lambda : (self.reject_reason_received == reject_reason)), lock=mininode_lock)
| -4,979,453,914,077,187,000
|
Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_code and reject_reason are set: assert that the correct reject message is received.
|
test/functional/test_framework/mininode.py
|
send_txs_and_test
|
BitcoinSN/BitcoinSN
|
python
|
def send_txs_and_test(self, txs, rpc, success=True, expect_disconnect=False, reject_code=None, reject_reason=None):
"Send txs to test node and test whether they're accepted to the mempool.\n\n - add all txs to our tx_store\n - send tx messages for all txs\n - if success is True/False: assert that the txs are/are not accepted to the mempool\n - if expect_disconnect is True: Skip the sync with ping\n - if reject_code and reject_reason are set: assert that the correct reject message is received."
with mininode_lock:
self.reject_code_received = None
self.reject_reason_received = None
for tx in txs:
self.tx_store[tx.sha256] = tx
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = rpc.getrawmempool()
if success:
for tx in txs:
assert (tx.hash in raw_mempool), '{} not found in mempool'.format(tx.hash)
else:
for tx in txs:
assert (tx.hash not in raw_mempool), '{} tx found in mempool'.format(tx.hash)
if (reject_code is not None):
wait_until((lambda : (self.reject_code_received == reject_code)), lock=mininode_lock)
if (reject_reason is not None):
wait_until((lambda : (self.reject_reason_received == reject_reason)), lock=mininode_lock)
|
def run(self, s):
'\n :param s: input in string format\n :return: solution flag\n '
(_, buses) = s.split('\n')
buses = [((k % int(n)), int(n)) for (k, n) in enumerate(buses.split(',')) if (n != 'x')]
(_, base) = buses[0]
multiplier = base
for (rest, b) in buses[1:]:
k = 1
while (((base + (multiplier * k)) % b) != (b - rest)):
k += 1
base = (base + (multiplier * k))
multiplier = (multiplier * b)
return base
| 33,742,449,778,658,504
|
:param s: input in string format
:return: solution flag
|
day-13/part-2/coco.py
|
run
|
david-ds/adventofcode-2020
|
python
|
def run(self, s):
'\n :param s: input in string format\n :return: solution flag\n '
(_, buses) = s.split('\n')
buses = [((k % int(n)), int(n)) for (k, n) in enumerate(buses.split(',')) if (n != 'x')]
(_, base) = buses[0]
multiplier = base
for (rest, b) in buses[1:]:
k = 1
while (((base + (multiplier * k)) % b) != (b - rest)):
k += 1
base = (base + (multiplier * k))
multiplier = (multiplier * b)
return base
|
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
'Compares dtypes across JAX and TF dtypes. Overrides super method.'
def to_numpy_dtype(dt):
return (dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype)
if ((not config.FLAGS.jax_enable_x64) and canonicalize_dtypes):
self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))), dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(to_numpy_dtype(jtu._dtype(x)), to_numpy_dtype(jtu._dtype(y)))
| 7,438,900,400,489,860,000
|
Compares dtypes across JAX and TF dtypes. Overrides super method.
|
jax/experimental/jax2tf/tests/tf_test_util.py
|
assertDtypesMatch
|
BuddenD/jax
|
python
|
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
def to_numpy_dtype(dt):
return (dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype)
if ((not config.FLAGS.jax_enable_x64) and canonicalize_dtypes):
self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))), dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(to_numpy_dtype(jtu._dtype(x)), to_numpy_dtype(jtu._dtype(y)))
|
def ConvertAndCompare(self, func_jax: Callable, *args, with_function: bool=False, atol=None, rtol=None) -> Tuple[(Any, Any)]:
'Compares jax_func(*args) with convert(jax_func)(*args).'
func_tf = jax2tf.convert(func_jax)
if with_function:
func_tf = tf.function(func_tf)
res_jax = func_jax(*args)
res_tf = func_tf(*args)
self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)
return (res_jax, res_tf)
| 6,940,974,501,894,013,000
|
Compares jax_func(*args) with convert(jax_func)(*args).
|
jax/experimental/jax2tf/tests/tf_test_util.py
|
ConvertAndCompare
|
BuddenD/jax
|
python
|
def ConvertAndCompare(self, func_jax: Callable, *args, with_function: bool=False, atol=None, rtol=None) -> Tuple[(Any, Any)]:
func_tf = jax2tf.convert(func_jax)
if with_function:
func_tf = tf.function(func_tf)
res_jax = func_jax(*args)
res_tf = func_tf(*args)
self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)
return (res_jax, res_tf)
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_id = activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.create_new()
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return eval_output
| -6,288,681,077,837,944,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_id = activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.create_new()
activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.ActivityContributorsSummaryOneOffJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
return eval_output
|
def _run_one_off_job(self):
'Runs the one-off MapReduce job.'
job_id = activity_jobs_one_off.AuditContributorsOneOffJob.create_new()
activity_jobs_one_off.AuditContributorsOneOffJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AuditContributorsOneOffJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
for item in eval_output:
if isinstance(item[1], list):
item[1] = [ast.literal_eval(triple) for triple in item[1]]
return eval_output
| 5,256,973,170,232,315,000
|
Runs the one-off MapReduce job.
|
core/domain/activity_jobs_one_off_test.py
|
_run_one_off_job
|
AnanyaNegi/oppia
|
python
|
def _run_one_off_job(self):
job_id = activity_jobs_one_off.AuditContributorsOneOffJob.create_new()
activity_jobs_one_off.AuditContributorsOneOffJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_mapreduce_taskqueue(taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = activity_jobs_one_off.AuditContributorsOneOffJob.get_output(job_id)
eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]
for item in eval_output:
if isinstance(item[1], list):
item[1] = [ast.literal_eval(triple) for triple in item[1]]
return eval_output
|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this overrides the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
if (commit_type not in ['create', 'delete']):
collection_models.CollectionCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, collection_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=self.status, post_commit_community_owned=self.community_owned, post_commit_is_private=(self.status == constants.ACTIVITY_STATUS_PRIVATE)).put()
| -3,582,706,145,878,410
|
Record the event to the commit log after the model commit.
Note that this overrides the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
|
core/domain/activity_jobs_one_off_test.py
|
_trusted_commit
|
AnanyaNegi/oppia
|
python
|
def _trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds):
'Record the event to the commit log after the model commit.\n\n Note that this overrides the superclass method.\n\n Args:\n committer_id: str. The user_id of the user who committed the\n change.\n commit_type: str. The type of commit. Possible values are in\n core.storage.base_models.COMMIT_TYPE_CHOICES.\n commit_message: str. The commit description message.\n commit_cmds: list(dict). A list of commands, describing changes\n made in this model, should give sufficient information to\n reconstruct the commit. Each dict always contains:\n cmd: str. Unique command.\n and then additional arguments for that command.\n '
base_models.VersionedModel._trusted_commit(self, committer_id, commit_type, commit_message, commit_cmds)
if (commit_type not in ['create', 'delete']):
collection_models.CollectionCommitLogEntryModel(id=('rights-%s-%s' % (self.id, self.version)), user_id=committer_id, collection_id=self.id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=None, post_commit_status=self.status, post_commit_community_owned=self.community_owned, post_commit_is_private=(self.status == constants.ACTIVITY_STATUS_PRIVATE)).put()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.