function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])}) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__() | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
if site_type in ['SLICEM']:
yield site | SymbiFlow/prjuray | [
48,
12,
48,
18,
1594844148
] |
def loc_cell(name, c, leaf, lut):
bel = '{c}{lut}LUT'.format(c=c.upper(), lut=lut)
print(
'set {name} [get_cells {root_cell}/{c}lut_i/{leaf}]'.format(
root_cell=root_cell, name=name, c=c, leaf=leaf),
file=f)
print(
'set_property BEL {bel} ${name}'.format(bel=bel, name=name),
file=f)
print(
'set_property LOC {site} ${name}'.format(site=site, name=name),
file=f)
print('', file=f) | SymbiFlow/prjuray | [
48,
12,
48,
18,
1594844148
] |
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {}) | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def outline(elements, **kwargs) -> Component:
"""
Returns Component containing the outlined polygon(s).
wraps phidl.geometry.outline
Creates an outline around all the polygons passed in the `elements`
argument. `elements` may be a Device, Polygon, or list of Devices.
Args:
elements: Device(/Reference), list of Device(/Reference), or Polygon
Polygons to outline or Device containing polygons to outline.
Keyword Args:
distance: int or float
Distance to offset polygons. Positive values expand, negative shrink.
precision: float
Desired precision for rounding vertex coordinates.
num_divisions: array-like[2] of int
The number of divisions with which the geometry is divided into
multiple rectangular regions. This allows for each region to be
processed sequentially, which is more computationally efficient.
join: {'miter', 'bevel', 'round'}
Type of join used to create the offset polygon.
tolerance: int or float
For miter joints, this number must be at least 2 and it represents the
maximal distance in multiples of offset between new vertices and their
original position before beveling to avoid spikes at acute joints. For
round joints, it indicates the curvature resolution in number of
points per full circle.
join_first: bool
Join all paths before offsetting to avoid unnecessary joins in
adjacent polygon sides.
max_points: int
The maximum number of vertices within the resulting polygon.
open_ports: bool or float
If not False, holes will be cut in the outline such that the Ports are
not covered. If True, the holes will have the same width as the Ports.
If a float, the holes will be be widened by that value (useful for fully
clearing the outline around the Ports for positive-tone processes
layer: int, array-like[2], or set
Specific layer(s) to put polygon geometry on.)
"""
return gf.read.from_phidl(component=pg.outline(elements, **kwargs)) | gdsfactory/gdsfactory | [
177,
72,
177,
80,
1585200379
] |
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def __init__(self):
r"""
:param Data: 设备端SDK填入测试TID参数后生成的加密数据串
:type Data: str
"""
self.Data = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Pass: 认证结果
:type Pass: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Tid: 接收回执成功的TID
:type Tid: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tid = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param RemaindCount: 剩余空发数量
:type RemaindCount: int
:param Tid: 已回执的TID编码
:type Tid: str
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RemaindCount = None
self.Tid = None
self.ProductKey = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param OrderId: 订单ID
:type OrderId: str
:param Quantity: 数量,1~100
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param TidSet: 空发的TID信息 | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId") | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
"""
self.OrderId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Quantity: 可空发的白盒密钥数量
:type Quantity: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Quantity = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param EnterpriseUser: 企业用户
:type EnterpriseUser: bool
:param DownloadPermission: 下载控制台权限
:type DownloadPermission: str
:param UsePermission: 使用控制台权限
:type UsePermission: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EnterpriseUser = None
self.DownloadPermission = None
self.UsePermission = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Quantity: 下载数量:1~10
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param TidSet: 下载的TID信息列表 | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.RequestId = params.get("RequestId") | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Tid: TID号码
:type Tid: str
:param PublicKey: 公钥
:type PublicKey: str
:param PrivateKey: 私钥
:type PrivateKey: str
:param Psk: 共享密钥
:type Psk: str
:param DownloadUrl: 软加固白盒密钥下载地址
:type DownloadUrl: str
:param DeviceCode: 软加固设备标识码
:type DeviceCode: str
"""
self.Tid = None
self.PublicKey = None
self.PrivateKey = None
self.Psk = None
self.DownloadUrl = None
self.DeviceCode = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param CodeSet: 硬件唯一标识码
:type CodeSet: list of str
:param OrderId: 硬件标识码绑定的申请编号
:type OrderId: str
"""
self.CodeSet = None
self.OrderId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Count: 本次已上传数量
:type Count: int
:param ExistedCodeSet: 重复的硬件唯一标识码 | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def _deserialize(self, params):
self.Count = params.get("Count")
self.ExistedCodeSet = params.get("ExistedCodeSet")
self.LeftQuantity = params.get("LeftQuantity")
self.IllegalCodeSet = params.get("IllegalCodeSet")
self.RequestId = params.get("RequestId") | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Data: 验证数据
:type Data: str
"""
self.Data = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self):
r"""
:param Pass: 验证结果
:type Pass: bool
:param VerifiedTimes: 已验证次数
:type VerifiedTimes: int
:param LeftTimes: 剩余验证次数
:type LeftTimes: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.VerifiedTimes = None
self.LeftTimes = None
self.RequestId = None | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self, opt: Opt, shared: PT.TShared = None):
self.blank_image_id = '0000'
super().__init__(opt, shared)
if shared is not None:
self.valid_image_ids = shared['valid_image_ids']
if self.image_features_dict is not None:
self.image_features_dict[self.blank_image_id] = self.blank_image_features
self.multi_ref = opt.get('igc_multi_ref', False) | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def image_id_to_image_path(self, image_id: str) -> str:
"""
Return image path given image id.
As this function is used in building the image features, and some of the
:param image_id:
image_id key, for IGC this is a str
:return:
the image path associated with the given image key
"""
if image_id not in self.valid_image_ids:
image_id = self.blank_image_id
return os.path.join(self.get_image_path(self.opt), image_id) | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_image_features_path(self, task, image_model_name, dt):
"""
Override so that subclasses can see same image features.
"""
# In default implementation, self.data_path already has task name added
image_features_path = os.path.join(self.data_path, 'image_features')
if not os.path.isdir(image_features_path):
PathManager.mkdirs(image_features_path)
return os.path.join(
image_features_path, f'{image_model_name}_{dt}_features_dict'
) | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def num_examples(self) -> int:
"""
Number of examples.
There are three turns of dialogue in the IGC task -
Context, Question, Response.
Thus, return 3 * number of data examples.
"""
return 3 * len(self.data) | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def load_data(self, data_path: str, opt: Opt) -> List[Dict[str, Any]]:
"""
Override to load CSV files.
"""
dt = opt['datatype'].split(':')[0]
dt_str = 'test' if dt == 'test' else 'val'
dp = os.path.join(self.get_data_path(opt), f'IGC_crowd_{dt_str}.csv')
if not os.path.exists(dp):
raise RuntimeError(
'Please download the IGC Dataset from '
'https://www.microsoft.com/en-us/download/details.aspx?id=55324. '
'Then, make sure to put the two .csv files in {}'.format(
self.get_data_path(opt)
)
)
if (
not os.path.exists(self.get_image_path(opt))
or len(os.listdir(self.get_image_path(opt))) <= 1
):
self._download_images(opt)
self.data = []
with PathManager.open(dp, newline='\n') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
fields = []
for i, row in enumerate(reader):
if i == 0:
fields = row
else:
ep = dict(zip(fields, row))
ep['image_id'] = f'{ep["id"]}'
self.data.append(ep)
if dt == 'train':
# Take first 90% of valid set as train
self.data = self.data[: int(len(self.data) * 0.9)]
elif dt == 'valid':
self.data = self.data[int(len(self.data) * 0.9) :]
self.valid_image_ids = []
for d in self.data:
img_path = os.path.join(self.get_image_path(opt), d['image_id'])
if PathManager.exists(img_path):
self.valid_image_ids.append(d['image_id'])
self.valid_image_ids = set(self.valid_image_ids)
return self.data | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def share(self) -> PT.TShared:
shared = super().share()
shared['valid_image_ids'] = self.valid_image_ids
return shared | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def num_episodes(self) -> int:
return len(self.data) | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_label_key(self) -> str:
"""
Return key into data dictionary for the label.
"""
pass | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_text(self, data) -> str:
"""
Return text for an example.
"""
pass | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_label_key(self) -> str:
return 'response' | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_label_key(self) -> str:
return 'question' | facebookresearch/ParlAI | [
9846,
2003,
9846,
72,
1493053844
] |
def get_context(self):
context = super(BasicPage, self).get_context()
manifest = Manifest()
manifest_filepath = os.path.join(settings.SOURCES_DIR, 'css', 'styleguide_manifest.css')
with io.open(manifest_filepath, 'r') as fp:
manifest.load(fp)
context.update({
'styleguide': manifest,
'version': sveetoy_version,
'foundation_version': self.foundation_version,
})
return context | sveetch/Sveetoy | [
1,
1,
1,
7,
1483118147
] |
def get_context(self):
context = super(PageWithSitemap, self).get_context()
context.update({
'site_sitemap': self.sitemap,
})
return context | sveetch/Sveetoy | [
1,
1,
1,
7,
1483118147
] |
def __init__(self, plotly_name="hoverlabel", parent_name="scattercarpet", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`. | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.default_session.query = Session.query.with_parent(event)
self.track_group.query = TrackGroup.query.with_parent(event) | indico/indico | [
1446,
358,
1446,
649,
1311774990
] |
def program_render_mode(self):
return RenderMode.markdown | indico/indico | [
1446,
358,
1446,
649,
1311774990
] |
def _round_if_needed(arr, dtype):
"""Rounds arr inplace if the destination dtype is an integer.
"""
if cupy.issubdtype(dtype, cupy.integer):
arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330) | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def _view_roi(array, original_area_slice, axis):
"""Gets a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Args:
array(cupy.ndarray): The array with the region of interest.
original_area_slice(tuple of slices): Denotes the area with original
values of the unpadded array.
axis(int): The currently padded dimension assuming that `axis` is padded
before `axis` + 1.
Returns:
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl] | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def _set_pad_area(padded, axis, width_pair, value_pair):
"""Set an empty-padded area in given dimension.
"""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1] | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""Constructs linear ramps for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the ramps are constructed.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
end_value_pair((scalar, scalar)): End values for the linear ramps which
form the edge of the fully padded array. These values are included in
the linear ramps.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp = cupy.linspace(
start=end_value_pair[0],
# squeeze axis replaced by linspace
stop=edge_pair[0].squeeze(axis),
num=width_pair[0],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
right_ramp = cupy.linspace(
start=end_value_pair[1],
# squeeze axis replaced by linspace
stop=edge_pair[1].squeeze(axis),
num=width_pair[1],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""Pads an `axis` of `arr` using reflection.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
method(str): Controls method of reflection; options are 'even' or 'odd'.
include_edge(bool, optional): If true, edge value is included in
reflection, otherwise the edge value forms the symmetric axis to the
reflection. (Default value = False)
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, left_pad)
# Slice right to left, stop on or next to edge, start relative to stop
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
# Insert chunk into padded area
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
# Adjust pointer to left edge for next iteration
left_pad -= chunk_length
if right_pad > 0:
# Pad with reflected values on right side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, right_pad)
# Slice right to left, start on or next to edge, stop relative to start
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(
slice(-right_pad - 1, -right_pad), axis
)
right_chunk = 2 * padded[edge_slice] - right_chunk
# Insert chunk into padded area
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
# Adjust pointer to right edge for next iteration
right_pad -= chunk_length
return left_pad, right_pad | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def _as_pairs(x, ndim, as_index=False):
"""Broadcasts `x` to an array with shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Args:
x(scalar or array-like, optional): The object to broadcast to the shape
(`ndim`, 2).
ndim(int): Number of pairs the broadcasted `x` will have.
as_index(bool, optional): If `x` is not None, try to round each
element of `x` to an integer (dtype `cupy.intp`) and ensure every
element is positive. (Default value = False)
Returns:
nested iterables, shape (`ndim`, 2): The broadcasted version of `x`.
"""
if x is None:
# Pass through None as a special case, otherwise cupy.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
elif isinstance(x, numbers.Number):
if as_index:
x = round(x)
return ((x, x),) * ndim
x = numpy.array(x)
if as_index:
x = numpy.asarray(numpy.round(x), dtype=numpy.intp)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `numpy.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
x_view = x.view()
x_view.shape = (ndim, 2)
return x_view.tolist() | cupy/cupy | [
6731,
672,
6731,
478,
1477994085
] |
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
provider_name: str,
resource_type: str,
resource_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def _configure(
self,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def convert_cli(
# fmt: off
input_path: str = Arg(..., help="Input file or directory", exists=True),
output_dir: Path = Arg("-", help="Output directory. '-' for stdout.", allow_dash=True, exists=True),
file_type: FileTypes = Opt("spacy", "--file-type", "-t", help="Type of data to produce"),
n_sents: int = Opt(1, "--n-sents", "-n", help="Number of sentences per doc (0 to disable)"),
seg_sents: bool = Opt(False, "--seg-sents", "-s", help="Segment sentences (for -c ner)"),
model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"),
morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"),
merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"),
converter: str = Opt("auto", "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"),
ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True),
lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"),
concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"),
# fmt: on | spacy-io/spaCy | [
25459,
4045,
25459,
98,
1404400540
] |
def convert(
input_path: Union[str, Path],
output_dir: Union[str, Path],
*,
file_type: str = "json",
n_sents: int = 1,
seg_sents: bool = False,
model: Optional[str] = None,
morphology: bool = False,
merge_subtokens: bool = False,
converter: str = "auto",
ner_map: Optional[Path] = None,
lang: Optional[str] = None,
concatenate: bool = False,
silent: bool = True,
msg: Optional[Printer], | spacy-io/spaCy | [
25459,
4045,
25459,
98,
1404400540
] |
def _print_docs_to_stdout(data: Any, output_type: str) -> None:
if output_type == "json":
srsly.write_json("-", data)
else:
sys.stdout.buffer.write(data) | spacy-io/spaCy | [
25459,
4045,
25459,
98,
1404400540
] |
def autodetect_ner_format(input_data: str) -> Optional[str]:
# guess format from the first 20 lines
lines = input_data.split("\n")[:20]
format_guesses = {"ner": 0, "iob": 0}
iob_re = re.compile(r"\S+\|(O|[IB]-\S+)")
ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$")
for line in lines:
line = line.strip()
if iob_re.search(line):
format_guesses["iob"] += 1
if ner_re.search(line):
format_guesses["ner"] += 1
if format_guesses["iob"] == 0 and format_guesses["ner"] > 0:
return "ner"
if format_guesses["ner"] == 0 and format_guesses["iob"] > 0:
return "iob"
return None | spacy-io/spaCy | [
25459,
4045,
25459,
98,
1404400540
] |
def verify_cli_args(
msg: Printer,
input_path: Union[str, Path],
output_dir: Union[str, Path],
file_type: FileTypes,
converter: str,
ner_map: Optional[Path], | spacy-io/spaCy | [
25459,
4045,
25459,
98,
1404400540
] |
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def _configure(
self,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def __init__(
self, plotly_name="namelengthsrc", parent_name="bar.hoverlabel", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, data=None):
self.data = data
self.next = None | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def make_linked_list(elements_list: list):
"""Creates a Linked List from the elements of the given sequence
(list/tuple) and returns the head of the Linked List.
>>> make_linked_list([])
Traceback (most recent call last):
...
Exception: The Elements List is empty
>>> make_linked_list([7])
7
>>> make_linked_list(['abc'])
abc
>>> make_linked_list([7, 25])
7->25
"""
if not elements_list:
raise Exception("The Elements List is empty")
current = head = Node(elements_list[0])
for i in range(1, len(elements_list)):
current.next = Node(elements_list[i])
current = current.next
return head | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def main():
from doctest import testmod
testmod()
linked_list = make_linked_list([14, 52, 14, 12, 43])
print("Linked List:")
print(linked_list)
print("Elements in Reverse:")
print_reverse(linked_list) | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def __init__(self, text: str, pattern: str):
self.text, self.pattern = text, pattern
self.textLen, self.patLen = len(text), len(pattern) | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def mismatch_in_text(self, currentPos: int) -> int:
"""
find the index of mis-matched character in text when compared with pattern
from last
Parameters :
currentPos (int): current index position of text
Returns :
i (int): index of mismatched char from last in text
-1 (int): if there is no mismatch between pattern and text block
"""
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[currentPos + i]:
return currentPos + i
return -1 | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def __init__(
self, plotly_name="sizesrc", parent_name="surface.hoverlabel.font", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def paste_proxy_patched_call(self, environ, start_response):
if (self.allowed_request_methods and
environ['REQUEST_METHOD'].lower() not in self.allowed_request_methods):
return httpexceptions.HTTPBadRequest("Disallowed")(environ, start_response)
if self.scheme == 'http':
ConnClass = httplib.HTTPConnection
elif self.scheme == 'https':
ConnClass = httplib.HTTPSConnection
else:
raise ValueError(
"Unknown scheme for %r: %r" % (self.address, self.scheme))
conn = ConnClass(self.host)
headers = {}
for key, value in environ.items():
if key.startswith('HTTP_'):
key = key[5:].lower().replace('_', '-')
if key == 'host' or key in self.suppress_http_headers:
continue
headers[key] = value
headers['host'] = self.host
if 'REMOTE_ADDR' in environ:
headers['x-forwarded-for'] = environ['REMOTE_ADDR']
# synthese modification
if 'HTTP_HOST' in environ:
headers['x-forwarded-host'] = environ['HTTP_HOST']
# end of synthese modification
if environ.get('CONTENT_TYPE'):
headers['content-type'] = environ['CONTENT_TYPE']
if environ.get('CONTENT_LENGTH'):
if environ['CONTENT_LENGTH'] == '-1':
# This is a special case, where the content length is basically undetermined
body = environ['wsgi.input'].read(-1)
headers['content-length'] = str(len(body))
else:
headers['content-length'] = environ['CONTENT_LENGTH']
length = int(environ['CONTENT_LENGTH'])
body = environ['wsgi.input'].read(length)
else:
body = ''
path_info = urllib.quote(environ['PATH_INFO'])
if self.path:
request_path = path_info
if request_path and request_path[0] == '/':
request_path = request_path[1:]
path = urlparse.urljoin(self.path, request_path)
else:
path = path_info
if environ.get('QUERY_STRING'):
path += '?' + environ['QUERY_STRING']
conn.request(environ['REQUEST_METHOD'],
path,
body, headers)
res = conn.getresponse()
headers_out = parse_headers(res.msg)
status = '%s %s' % (res.status, res.reason)
start_response(status, headers_out)
# @@: Default?
length = res.getheader('content-length')
if length is not None:
body = res.read(int(length))
else:
body = res.read()
conn.close()
return [body] | Open-Transport/synthese | [
25,
5,
25,
1,
1386004630
] |
def __init__(self, env, project):
self.env = env
self.proxy_app = Proxy('http://localhost:%s/' % env.c.port)
# import here to avoid circular dependencies.
from synthesepy import web
self.web_app = web.get_application(project=project)
self.static_apps = []
for base, path in env.c.static_paths:
self.static_apps.append((base, static.Cling(path))) | Open-Transport/synthese | [
25,
5,
25,
1,
1386004630
] |
def add_utf8_header(self, start_response):
def start_response_wrapper(status, headers):
headers_dict = dict(headers)
if headers_dict['Content-Type'] == 'text/html':
headers_dict['Content-Type'] = 'text/html; charset=UTF-8'
return start_response(status, headers_dict.items())
return start_response_wrapper | Open-Transport/synthese | [
25,
5,
25,
1,
1386004630
] |
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO']
# Web app
WEB_APP_PREFIX = '/w/'
if path_info.startswith(WEB_APP_PREFIX):
werkzeug.wsgi.pop_path_info(environ)
return self.web_app(environ, start_response)
# Admin redirect helpers.
if path_info in ('/admin', '/admin/'):
return self._redirect(environ, start_response, self.ADMIN_URL)
if path_info.endswith(
tuple(self.SYNTHESE_SUFFIXES + self.env.c.synthese_suffixes)):
return self.proxy_app(environ, self.add_utf8_header(start_response))
return self._handle_static_files(environ, start_response) | Open-Transport/synthese | [
25,
5,
25,
1,
1386004630
] |
def start(env, project):
global wsgi_httpd
if USE_PASTE_HTTPD:
import paste.httpserver
paste_log = logging.getLogger('paste.httpserver.ThreadPool')
paste_log.setLevel(logging.WARNING)
wsgi_httpd = paste.httpserver.serve(
WSGIProxy(env, project), '0.0.0.0', env.c.wsgi_proxy_port, start_loop=False)
else:
wsgi_httpd = simple_server.make_server(
'', env.c.wsgi_proxy_port, WSGIProxy(env))
log.info('WSGI proxy serving on http://localhost:%s' %
env.c.wsgi_proxy_port)
threading.Thread(target=wsgi_httpd.serve_forever).start() | Open-Transport/synthese | [
25,
5,
25,
1,
1386004630
] |
def locked():
return mark_safe('<img src="%s" alt="locked" style="border:0px; margin: 0px; padding: 0px"/>' % (
static('manager/padlock_close.png')
)) | efce/voltPy | [
1,
1,
1,
14,
1500044176
] |
def get_field_queryset(self, db, db_field, request):
if db_field.name == "discipline":
# Filter the discipline field's queryset based on the parent journal's type.
if request._obj:
return db_field.remote_field.model._default_manager.using(db).filter(
type__code=request._obj.type.code
)
# If there is no parent journal (during journal creation), return an empty queryset.
else:
return db_field.remote_field.model._default_manager.using(db).none()
return super().get_field_queryset(db, db_field, request) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit `year_of_addition` field values to the current year and the next two years.
now = tz.now()
min_year = now.year
max_year = min_year + 2
self.fields["year_of_addition"].validators = [
MinValueValidator(min_year),
MaxValueValidator(max_year),
]
self.fields["year_of_addition"].widget = AdminIntegerFieldWidget(
attrs={
"min": min_year,
"max": max_year,
},
) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def get_form(self, request, obj=None, change=False, **kwargs):
# Save the journal object on the request to have access to it in `JournalDisciplineInline`.
request._obj = obj
return super().get_form(request, obj, change, **kwargs) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def force_free_access_to_true(self, request, queryset):
"""Mark a set of issues as open access"""
queryset.update(force_free_access=True) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def force_free_access_to_false(self, request, queryset):
"""Mark a set of issues as not open access"""
queryset.update(force_free_access=False) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def view_issue_on_site(self, obj):
""" Display the link leading to the issue on website """
url = reverse(
"public:journal:issue_detail",
kwargs={
"journal_code": obj.journal.code,
"issue_slug": obj.volume_slug,
"localidentifier": obj.localidentifier,
},
)
if not obj.is_published and obj.journal.collection.is_main_collection:
url = "{url}?ticket={ticket}".format(url=url, ticket=obj.prepublication_ticket)
return format_html("<a href={}>{}</a>", url, _("Voir sur le site")) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields + ("is_published",) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Exclude French & English from other_languages field. These languages are set in the
# main_languages field.
self.fields["other_languages"].queryset = Language.objects.exclude(id__in=[1, 2]) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def get_types(self, obj):
return ", ".join([t.name for t in obj.type.all()]) | erudit/eruditorg | [
15,
6,
15,
15,
1445630709
] |
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh) | ivmech/iviny-scope | [
19,
2,
19,
1,
1398667377
] |
def __init__(self, parent, sourceslist, datadir, distro):
"""
Initialize the dialog that allows to add a new source entering the
raw apt line
"""
self.sourceslist = sourceslist
self.parent = parent
self.datadir = datadir
# gtk stuff
setup_ui(self, os.path.join(datadir, "gtkbuilder", "dialog-add.ui"), domain="software-properties") | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def run(self):
res = self.dialog.run()
self.dialog.hide()
if res == Gtk.ResponseType.OK:
line = self.entry.get_text() + "\n"
else:
line = None
return line | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def attrs_to_dict(attrs):
"""
Converts a minidom NamedNodeMap that represents the attributes
of a node into a dictionary. The keys are attribute names.
The values are the attributes' string values.
"""
return dict([(str(attr.name),attr.value) for attr in attrs.values()]) | markgw/jazzparser | [
5,
1,
5,
1,
1368367354
] |
def get_single_element_by_tag_name(node, tag_name, optional=False):
"""
Returns an element that is a child of the given node and that
has the tag name given. This method is used where it is assumed
that one such tag exists.
If there is none, an exception is
raised. If there is more than one, the first is returned. | markgw/jazzparser | [
5,
1,
5,
1,
1368367354
] |
def require_attrs(node, attrs):
"""
Checks for the existence of the named attributes on the given
node and raises an exception if they're not there. | markgw/jazzparser | [
5,
1,
5,
1,
1368367354
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.