body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(name='mute', description='Mutes a member in the server', guild_ids=[settings.get_value('guild_id')], options=[create_option(name='member', description='The member that will be muted', option_type=6, required=True), create_option(name='reason', description='The reason why the member is being muted', option_type=3, required=False), create_option(name='duration', description='The length of time the user will be muted for', option_type=3, required=False)], default_permission=False, permissions={settings.get_value('guild_id'): [create_permission(settings.get_value('role_staff'), SlashCommandPermissionType.ROLE, True), create_permission(settings.get_value('role_trial_mod'), SlashCommandPermissionType.ROLE, True)]})
async def mute(self, ctx: SlashContext, member: discord.Member, duration: str=None, reason: str=None):
' Mutes member in guild. '
(await ctx.defer())
if (not isinstance(member, discord.Member)):
(await embeds.error_message(ctx=ctx, description=f'That user is not in the server.'))
return
if (not (await can_action_member(bot=self.bot, ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'You cannot action {member.mention}.'))
return
if (await self.is_user_muted(ctx=ctx, member=member)):
(await embeds.error_message(ctx=ctx, description=f'{member.mention} is already muted.'))
return
if (not reason):
reason = 'No reason provided.'
elif (len(reason) > 512):
(await embeds.error_message(ctx=ctx, description='Reason must be less than 512 characters.'))
return
if (not duration):
embed = embeds.make_embed(ctx=ctx, title=f'Muting member: {member.name}', description=f'{member.mention} was muted by {ctx.author.mention} for: {reason}', thumbnail_url='https://i.imgur.com/rHtYWIt.png', color='soft_red')
channel = (await self.create_mute_channel(ctx=ctx, member=member, reason=reason))
if (not (await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
(await self.mute_member(ctx=ctx, member=member, reason=reason))
(await ctx.send(embed=embed))
return
(duration_string, mute_end_time) = utils.duration.get_duration(duration=duration)
if (not duration_string):
(await embeds.error_message(ctx=ctx, description=f'''Duration syntax: `#d#h#m#s` (day, hour, min, sec)
You can specify up to all four but you only need one.'''))
return
embed = embeds.make_embed(ctx=ctx, title=f'Muting member: {member}', thumbnail_url='https://i.imgur.com/rHtYWIt.png', color='soft_red')
embed.description = f'{member.mention} was muted by {ctx.author.mention} for: {reason}'
embed.add_field(name='Duration:', value=duration_string, inline=False)
channel = (await self.create_mute_channel(ctx=ctx, member=member, reason=reason, duration=duration_string))
if (not (await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason, duration=duration_string))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
(await self.mute_member(ctx=ctx, member=member, reason=reason, temporary=True, end_time=mute_end_time.timestamp()))
(await ctx.send(embed=embed))
| 4,410,055,446,617,635,300
|
Mutes member in guild.
|
cogs/commands/moderation/mutes.py
|
mute
|
y0usef-2E/chiya
|
python
|
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(name='mute', description='Mutes a member in the server', guild_ids=[settings.get_value('guild_id')], options=[create_option(name='member', description='The member that will be muted', option_type=6, required=True), create_option(name='reason', description='The reason why the member is being muted', option_type=3, required=False), create_option(name='duration', description='The length of time the user will be muted for', option_type=3, required=False)], default_permission=False, permissions={settings.get_value('guild_id'): [create_permission(settings.get_value('role_staff'), SlashCommandPermissionType.ROLE, True), create_permission(settings.get_value('role_trial_mod'), SlashCommandPermissionType.ROLE, True)]})
async def mute(self, ctx: SlashContext, member: discord.Member, duration: str=None, reason: str=None):
' '
(await ctx.defer())
if (not isinstance(member, discord.Member)):
(await embeds.error_message(ctx=ctx, description=f'That user is not in the server.'))
return
if (not (await can_action_member(bot=self.bot, ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'You cannot action {member.mention}.'))
return
if (await self.is_user_muted(ctx=ctx, member=member)):
(await embeds.error_message(ctx=ctx, description=f'{member.mention} is already muted.'))
return
if (not reason):
reason = 'No reason provided.'
elif (len(reason) > 512):
(await embeds.error_message(ctx=ctx, description='Reason must be less than 512 characters.'))
return
if (not duration):
embed = embeds.make_embed(ctx=ctx, title=f'Muting member: {member.name}', description=f'{member.mention} was muted by {ctx.author.mention} for: {reason}', thumbnail_url='https://i.imgur.com/rHtYWIt.png', color='soft_red')
channel = (await self.create_mute_channel(ctx=ctx, member=member, reason=reason))
if (not (await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
(await self.mute_member(ctx=ctx, member=member, reason=reason))
(await ctx.send(embed=embed))
return
(duration_string, mute_end_time) = utils.duration.get_duration(duration=duration)
if (not duration_string):
(await embeds.error_message(ctx=ctx, description=f'Duration syntax: `#d#h#m#s` (day, hour, min, sec)
You can specify up to all four but you only need one.'))
return
embed = embeds.make_embed(ctx=ctx, title=f'Muting member: {member}', thumbnail_url='https://i.imgur.com/rHtYWIt.png', color='soft_red')
embed.description = f'{member.mention} was muted by {ctx.author.mention} for: {reason}'
embed.add_field(name='Duration:', value=duration_string, inline=False)
channel = (await self.create_mute_channel(ctx=ctx, member=member, reason=reason, duration=duration_string))
if (not (await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason, duration=duration_string))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
(await self.mute_member(ctx=ctx, member=member, reason=reason, temporary=True, end_time=mute_end_time.timestamp()))
(await ctx.send(embed=embed))
|
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(name='unmute', description='Unmutes a member in the server', guild_ids=[settings.get_value('guild_id')], options=[create_option(name='member', description='The member that will be unmuted', option_type=6, required=True), create_option(name='reason', description='The reason why the member is being unmuted', option_type=3, required=False)], default_permission=False, permissions={settings.get_value('guild_id'): [create_permission(settings.get_value('role_staff'), SlashCommandPermissionType.ROLE, True), create_permission(settings.get_value('role_trial_mod'), SlashCommandPermissionType.ROLE, True)]})
async def unmute(self, ctx: SlashContext, member: discord.Member, reason: str=None):
' Unmutes member in guild. '
(await ctx.defer())
if (not isinstance(member, discord.Member)):
(await embeds.error_message(ctx=ctx, description=f'That user is not in the server.'))
return
if (not (await can_action_member(bot=self.bot, ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'You cannot action {member.mention}.'))
return
if (not (await self.is_user_muted(ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'{member.mention} is not muted.'))
return
if (not reason):
reason = 'No reason provided.'
elif (len(reason) > 512):
(await embeds.error_message(ctx=ctx, description='Reason must be less than 512 characters.'))
return
embed = embeds.make_embed(ctx=ctx, title=f'Unmuting member: {member.name}', color='soft_green', thumbnail_url='https://i.imgur.com/W7DpUHC.png')
embed.description = f'{member.mention} was unmuted by {ctx.author.mention} for: {reason}'
(await self.unmute_member(ctx=ctx, member=member, reason=reason))
(await self.archive_mute_channel(ctx=ctx, user_id=member.id, reason=reason))
if (not (await self.send_unmuted_dm_embed(ctx=ctx, member=member, reason=reason))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
try:
(await ctx.send(embed=embed))
except discord.HTTPException:
pass
| 8,224,397,948,412,179,000
|
Unmutes member in guild.
|
cogs/commands/moderation/mutes.py
|
unmute
|
y0usef-2E/chiya
|
python
|
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(name='unmute', description='Unmutes a member in the server', guild_ids=[settings.get_value('guild_id')], options=[create_option(name='member', description='The member that will be unmuted', option_type=6, required=True), create_option(name='reason', description='The reason why the member is being unmuted', option_type=3, required=False)], default_permission=False, permissions={settings.get_value('guild_id'): [create_permission(settings.get_value('role_staff'), SlashCommandPermissionType.ROLE, True), create_permission(settings.get_value('role_trial_mod'), SlashCommandPermissionType.ROLE, True)]})
async def unmute(self, ctx: SlashContext, member: discord.Member, reason: str=None):
' '
(await ctx.defer())
if (not isinstance(member, discord.Member)):
(await embeds.error_message(ctx=ctx, description=f'That user is not in the server.'))
return
if (not (await can_action_member(bot=self.bot, ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'You cannot action {member.mention}.'))
return
if (not (await self.is_user_muted(ctx=ctx, member=member))):
(await embeds.error_message(ctx=ctx, description=f'{member.mention} is not muted.'))
return
if (not reason):
reason = 'No reason provided.'
elif (len(reason) > 512):
(await embeds.error_message(ctx=ctx, description='Reason must be less than 512 characters.'))
return
embed = embeds.make_embed(ctx=ctx, title=f'Unmuting member: {member.name}', color='soft_green', thumbnail_url='https://i.imgur.com/W7DpUHC.png')
embed.description = f'{member.mention} was unmuted by {ctx.author.mention} for: {reason}'
(await self.unmute_member(ctx=ctx, member=member, reason=reason))
(await self.archive_mute_channel(ctx=ctx, user_id=member.id, reason=reason))
if (not (await self.send_unmuted_dm_embed(ctx=ctx, member=member, reason=reason))):
embed.add_field(name='Notice:', value=f'Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.')
try:
(await ctx.send(embed=embed))
except discord.HTTPException:
pass
|
def _get_check_for_user(request, code):
' Return specified check if current user has access to it. '
assert request.user.is_authenticated
check = get_object_or_404(Check.objects.select_related('project'), code=code)
if request.user.is_superuser:
return (check, True)
if (request.user.id == check.project.owner_id):
return (check, True)
membership = get_object_or_404(Member, project=check.project, user=request.user)
return (check, membership.rw)
| -7,245,660,821,251,507,000
|
Return specified check if current user has access to it.
|
hc/front/views.py
|
_get_check_for_user
|
srvz/healthchecks
|
python
|
def _get_check_for_user(request, code):
' '
assert request.user.is_authenticated
check = get_object_or_404(Check.objects.select_related('project'), code=code)
if request.user.is_superuser:
return (check, True)
if (request.user.id == check.project.owner_id):
return (check, True)
membership = get_object_or_404(Member, project=check.project, user=request.user)
return (check, membership.rw)
|
def _get_channel_for_user(request, code):
' Return specified channel if current user has access to it. '
assert request.user.is_authenticated
channel = get_object_or_404(Channel.objects.select_related('project'), code=code)
if request.user.is_superuser:
return (channel, True)
if (request.user.id == channel.project.owner_id):
return (channel, True)
membership = get_object_or_404(Member, project=channel.project, user=request.user)
return (channel, membership.rw)
| 4,297,122,973,497,515,000
|
Return specified channel if current user has access to it.
|
hc/front/views.py
|
_get_channel_for_user
|
srvz/healthchecks
|
python
|
def _get_channel_for_user(request, code):
' '
assert request.user.is_authenticated
channel = get_object_or_404(Channel.objects.select_related('project'), code=code)
if request.user.is_superuser:
return (channel, True)
if (request.user.id == channel.project.owner_id):
return (channel, True)
membership = get_object_or_404(Member, project=channel.project, user=request.user)
return (channel, membership.rw)
|
def _get_project_for_user(request, project_code):
' Check access, return (project, rw) tuple. '
project = get_object_or_404(Project, code=project_code)
if request.user.is_superuser:
return (project, True)
if (request.user.id == project.owner_id):
return (project, True)
membership = get_object_or_404(Member, project=project, user=request.user)
return (project, membership.rw)
| 4,360,222,302,387,280,000
|
Check access, return (project, rw) tuple.
|
hc/front/views.py
|
_get_project_for_user
|
srvz/healthchecks
|
python
|
def _get_project_for_user(request, project_code):
' '
project = get_object_or_404(Project, code=project_code)
if request.user.is_superuser:
return (project, True)
if (request.user.id == project.owner_id):
return (project, True)
membership = get_object_or_404(Member, project=project, user=request.user)
return (project, membership.rw)
|
def _get_rw_project_for_user(request, project_code):
' Check access, return (project, rw) tuple. '
(project, rw) = _get_project_for_user(request, project_code)
if (not rw):
raise PermissionDenied
return project
| -6,583,339,608,857,261,000
|
Check access, return (project, rw) tuple.
|
hc/front/views.py
|
_get_rw_project_for_user
|
srvz/healthchecks
|
python
|
def _get_rw_project_for_user(request, project_code):
' '
(project, rw) = _get_project_for_user(request, project_code)
if (not rw):
raise PermissionDenied
return project
|
def _refresh_last_active_date(profile):
' Update last_active_date if it is more than a day old. '
now = timezone.now()
if ((profile.last_active_date is None) or ((now - profile.last_active_date).days > 0)):
profile.last_active_date = now
profile.save()
| -7,346,630,996,628,235,000
|
Update last_active_date if it is more than a day old.
|
hc/front/views.py
|
_refresh_last_active_date
|
srvz/healthchecks
|
python
|
def _refresh_last_active_date(profile):
' '
now = timezone.now()
if ((profile.last_active_date is None) or ((now - profile.last_active_date).days > 0)):
profile.last_active_date = now
profile.save()
|
def compute_average_surface_distance(seg_pred: Union[(np.ndarray, torch.Tensor)], seg_gt: Union[(np.ndarray, torch.Tensor)], label_idx: int, symmetric: bool=False, distance_metric: str='euclidean'):
'\n This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`\n under the default setting.\n In addition, if sets ``symmetric = True``, the average symmetric surface distance between\n these two inputs will be returned.\n\n Args:\n seg_pred: first binary or labelfield image.\n seg_gt: second binary or labelfield image.\n label_idx: for labelfield images, convert to binary with\n `seg_pred = seg_pred == label_idx`.\n symmetric: if calculate the symmetric average surface distance between\n `seg_pred` and `seg_gt`. Defaults to ``False``.\n distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]\n the metric used to compute surface distance. Defaults to ``"euclidean"``.\n '
(edges_pred, edges_gt) = get_mask_edges(seg_pred, seg_gt, label_idx)
surface_distance = get_surface_distance(edges_pred, edges_gt, label_idx, distance_metric=distance_metric)
if (surface_distance.shape == (0,)):
return np.inf
avg_surface_distance = surface_distance.mean()
if (not symmetric):
return avg_surface_distance
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, label_idx, distance_metric=distance_metric)
if (surface_distance_2.shape == (0,)):
return np.inf
avg_surface_distance_2 = surface_distance_2.mean()
return np.mean((avg_surface_distance, avg_surface_distance_2))
| 4,632,578,815,613,066,000
|
This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
seg_pred: first binary or labelfield image.
seg_gt: second binary or labelfield image.
label_idx: for labelfield images, convert to binary with
`seg_pred = seg_pred == label_idx`.
symmetric: if calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
|
monai/metrics/surface_distance.py
|
compute_average_surface_distance
|
Alxaline/MONAI
|
python
|
def compute_average_surface_distance(seg_pred: Union[(np.ndarray, torch.Tensor)], seg_gt: Union[(np.ndarray, torch.Tensor)], label_idx: int, symmetric: bool=False, distance_metric: str='euclidean'):
'\n This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`\n under the default setting.\n In addition, if sets ``symmetric = True``, the average symmetric surface distance between\n these two inputs will be returned.\n\n Args:\n seg_pred: first binary or labelfield image.\n seg_gt: second binary or labelfield image.\n label_idx: for labelfield images, convert to binary with\n `seg_pred = seg_pred == label_idx`.\n symmetric: if calculate the symmetric average surface distance between\n `seg_pred` and `seg_gt`. Defaults to ``False``.\n distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]\n the metric used to compute surface distance. Defaults to ``"euclidean"``.\n '
(edges_pred, edges_gt) = get_mask_edges(seg_pred, seg_gt, label_idx)
surface_distance = get_surface_distance(edges_pred, edges_gt, label_idx, distance_metric=distance_metric)
if (surface_distance.shape == (0,)):
return np.inf
avg_surface_distance = surface_distance.mean()
if (not symmetric):
return avg_surface_distance
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, label_idx, distance_metric=distance_metric)
if (surface_distance_2.shape == (0,)):
return np.inf
avg_surface_distance_2 = surface_distance_2.mean()
return np.mean((avg_surface_distance, avg_surface_distance_2))
|
def main():
'"options for criterion is wasserstien, h_divergence'
itertn = 1
c3_value = 0.5
for trial in range(1):
args = {'img_size': 28, 'chnnl': 1, 'lr': 0.01, 'momentum': 0.9, 'epochs': 1, 'tr_smpl': 1000, 'test_smpl': 10000, 'tsk_list': ['mnist', 'svhn', 'm_mnist'], 'grad_weight': 1, 'Trials': trial, 'criterion': 'wasserstien', 'c3': c3_value}
ft_extrctor_prp = {'layer1': {'conv': [1, 32, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}, 'layer2': {'conv': [32, 64, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}}
hypoth_prp = {'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'}, 'layer4': {'fc': [128, 10], 'act_fn': 'softmax'}}
discrm_prp = {'reverse_gradient': {}, 'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'}, 'layer4': {'fc': [128, 1], 'act_fn': 'sigm'}}
mtl = MTL_pairwise(ft_extrctor_prp, hypoth_prp, discrm_prp, **args)
del mtl
| 6,303,770,635,771,688,000
|
"options for criterion is wasserstien, h_divergence
|
MTL.py
|
main
|
cjshui/AMTNN
|
python
|
def main():
itertn = 1
c3_value = 0.5
for trial in range(1):
args = {'img_size': 28, 'chnnl': 1, 'lr': 0.01, 'momentum': 0.9, 'epochs': 1, 'tr_smpl': 1000, 'test_smpl': 10000, 'tsk_list': ['mnist', 'svhn', 'm_mnist'], 'grad_weight': 1, 'Trials': trial, 'criterion': 'wasserstien', 'c3': c3_value}
ft_extrctor_prp = {'layer1': {'conv': [1, 32, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}, 'layer2': {'conv': [32, 64, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}}
hypoth_prp = {'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'}, 'layer4': {'fc': [128, 10], 'act_fn': 'softmax'}}
discrm_prp = {'reverse_gradient': {}, 'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'}, 'layer4': {'fc': [128, 1], 'act_fn': 'sigm'}}
mtl = MTL_pairwise(ft_extrctor_prp, hypoth_prp, discrm_prp, **args)
del mtl
|
def reflection(image, axis=0):
'\n 8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.\n\n Parameters\n ----------\n image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.\n \n axis:変換する軸. defalutは`axis=0`\n\n Returns\n -------\n `image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(64).reshape((8,8))\n >>> a\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 61, 62, 63]])\n >>> dct_image_transform.reflection.reflection(a,axis=0)\n array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,\n 3.00000000e+00, 4.00000000e+00, 5.00000000e+00,\n 6.00000000e+00, 7.00000000e+00],\n [-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,\n -1.10000000e+01, -1.20000000e+01, -1.30000000e+01,\n -1.40000000e+01, -1.50000000e+01],\n [ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,\n 1.90000000e+01, 2.00000000e+01, 2.10000000e+01,\n 2.20000000e+01, 2.30000000e+01],\n [-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,\n -2.70000000e+01, -2.80000000e+01, -2.90000000e+01,\n -3.00000000e+01, -3.10000000e+01],\n [ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,\n 3.50000000e+01, 3.60000000e+01, 3.70000000e+01,\n 3.80000000e+01, 3.90000000e+01],\n [-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,\n -4.30000000e+01, -4.40000000e+01, -4.50000000e+01,\n -4.60000000e+01, -4.70000000e+01],\n [ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,\n 5.10000000e+01, 5.20000000e+01, 5.30000000e+01,\n 5.40000000e+01, 5.50000000e+01],\n [-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,\n -5.90000000e+01, -6.00000000e+01, -6.10000000e+01,\n -6.20000000e+01, -6.30000000e+01]])\n '
R = np.zeros((8, 8), dtype=np.float)
for i in range(8):
R[(i, (7 - i))] = 1
R = dct2(R)
if (axis == 0):
return np.vstack(list(map((lambda m: np.dot(R, m)), np.flip(np.vsplit(image, range(8, image.shape[1], 8)), 0))))
elif (axis == 1):
return np.hstack(list(map((lambda m: np.dot(m, R)), np.flip(np.hsplit(image, range(8, image.shape[1], 8)), 0))))
| -5,929,522,793,636,503,000
|
8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.
Parameters
----------
image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.
axis:変換する軸. defalutは`axis=0`
Returns
-------
`image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.
Examples
--------
>>> import numpy as np
>>> a = np.arange(64).reshape((8,8))
>>> a
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 61, 62, 63]])
>>> dct_image_transform.reflection.reflection(a,axis=0)
array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,
3.00000000e+00, 4.00000000e+00, 5.00000000e+00,
6.00000000e+00, 7.00000000e+00],
[-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,
-1.10000000e+01, -1.20000000e+01, -1.30000000e+01,
-1.40000000e+01, -1.50000000e+01],
[ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,
1.90000000e+01, 2.00000000e+01, 2.10000000e+01,
2.20000000e+01, 2.30000000e+01],
[-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,
-2.70000000e+01, -2.80000000e+01, -2.90000000e+01,
-3.00000000e+01, -3.10000000e+01],
[ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,
3.50000000e+01, 3.60000000e+01, 3.70000000e+01,
3.80000000e+01, 3.90000000e+01],
[-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,
-4.30000000e+01, -4.40000000e+01, -4.50000000e+01,
-4.60000000e+01, -4.70000000e+01],
[ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,
5.10000000e+01, 5.20000000e+01, 5.30000000e+01,
5.40000000e+01, 5.50000000e+01],
[-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,
-5.90000000e+01, -6.00000000e+01, -6.10000000e+01,
-6.20000000e+01, -6.30000000e+01]])
|
dct_image_transform/reflection.py
|
reflection
|
kanpurin/dctimagetransform
|
python
|
def reflection(image, axis=0):
'\n 8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.\n\n Parameters\n ----------\n image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.\n \n axis:変換する軸. defalutは`axis=0`\n\n Returns\n -------\n `image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(64).reshape((8,8))\n >>> a\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 61, 62, 63]])\n >>> dct_image_transform.reflection.reflection(a,axis=0)\n array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,\n 3.00000000e+00, 4.00000000e+00, 5.00000000e+00,\n 6.00000000e+00, 7.00000000e+00],\n [-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,\n -1.10000000e+01, -1.20000000e+01, -1.30000000e+01,\n -1.40000000e+01, -1.50000000e+01],\n [ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,\n 1.90000000e+01, 2.00000000e+01, 2.10000000e+01,\n 2.20000000e+01, 2.30000000e+01],\n [-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,\n -2.70000000e+01, -2.80000000e+01, -2.90000000e+01,\n -3.00000000e+01, -3.10000000e+01],\n [ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,\n 3.50000000e+01, 3.60000000e+01, 3.70000000e+01,\n 3.80000000e+01, 3.90000000e+01],\n [-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,\n -4.30000000e+01, -4.40000000e+01, -4.50000000e+01,\n -4.60000000e+01, -4.70000000e+01],\n [ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,\n 5.10000000e+01, 5.20000000e+01, 5.30000000e+01,\n 5.40000000e+01, 5.50000000e+01],\n [-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,\n -5.90000000e+01, -6.00000000e+01, -6.10000000e+01,\n -6.20000000e+01, -6.30000000e+01]])\n '
R = np.zeros((8, 8), dtype=np.float)
for i in range(8):
R[(i, (7 - i))] = 1
R = dct2(R)
if (axis == 0):
return np.vstack(list(map((lambda m: np.dot(R, m)), np.flip(np.vsplit(image, range(8, image.shape[1], 8)), 0))))
elif (axis == 1):
return np.hstack(list(map((lambda m: np.dot(m, R)), np.flip(np.hsplit(image, range(8, image.shape[1], 8)), 0))))
|
def __init__(self, x=0, y=0):
'Konstruktor punktu.'
self.x = x
self.y = y
| -1,485,226,074,151,816,200
|
Konstruktor punktu.
|
zadanka/l5zad4.py
|
__init__
|
wrutkowski1000/wizualizacja-danych
|
python
|
def __init__(self, x=0, y=0):
self.x = x
self.y = y
|
def get_index(dataset: Dataset, loader: Loader[(Dataset, Entity)]) -> Index[(Dataset, Entity)]:
'Load the search index for the given dataset or generate one if it does\n not exist.'
path = get_index_path(dataset)
index = Index.load(loader, path)
return index
| 350,794,033,161,731,000
|
Load the search index for the given dataset or generate one if it does
not exist.
|
opensanctions/core/index.py
|
get_index
|
alephdata/opensanctions
|
python
|
def get_index(dataset: Dataset, loader: Loader[(Dataset, Entity)]) -> Index[(Dataset, Entity)]:
'Load the search index for the given dataset or generate one if it does\n not exist.'
path = get_index_path(dataset)
index = Index.load(loader, path)
return index
|
def __init__(self, downloader=None):
'Constructor. Receives an optional downloader.'
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
| 7,054,030,604,609,068,000
|
Constructor. Receives an optional downloader.
|
youtube_dl/extractor/common.py
|
__init__
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def __init__(self, downloader=None):
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
|
@classmethod
def suitable(cls, url):
'Receives a URL and returns True if suitable for this IE.'
if ('_VALID_URL_RE' not in cls.__dict__):
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return (cls._VALID_URL_RE.match(url) is not None)
| -4,011,644,621,854,200,000
|
Receives a URL and returns True if suitable for this IE.
|
youtube_dl/extractor/common.py
|
suitable
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@classmethod
def suitable(cls, url):
if ('_VALID_URL_RE' not in cls.__dict__):
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return (cls._VALID_URL_RE.match(url) is not None)
|
@classmethod
def working(cls):
'Getter method for _WORKING.'
return cls._WORKING
| 2,406,935,002,155,684,400
|
Getter method for _WORKING.
|
youtube_dl/extractor/common.py
|
working
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@classmethod
def working(cls):
return cls._WORKING
|
def initialize(self):
'Initializes an instance (authentication, etc).'
self._initialize_geo_bypass({'countries': self._GEO_COUNTRIES, 'ip_blocks': self._GEO_IP_BLOCKS})
if (not self._ready):
self._real_initialize()
self._ready = True
| -4,230,263,112,828,807,000
|
Initializes an instance (authentication, etc).
|
youtube_dl/extractor/common.py
|
initialize
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def initialize(self):
self._initialize_geo_bypass({'countries': self._GEO_COUNTRIES, 'ip_blocks': self._GEO_IP_BLOCKS})
if (not self._ready):
self._real_initialize()
self._ready = True
|
def _initialize_geo_bypass(self, geo_bypass_context):
"\n Initialize geo restriction bypass mechanism.\n\n This method is used to initialize geo bypass mechanism based on faking\n X-Forwarded-For HTTP header. A random country from provided country list\n is selected and a random IP belonging to this country is generated. This\n IP will be passed as X-Forwarded-For HTTP header in all subsequent\n HTTP requests.\n\n This method will be used for initial geo bypass mechanism initialization\n during the instance initialization with _GEO_COUNTRIES and\n _GEO_IP_BLOCKS.\n\n You may also manually call it from extractor's code if geo bypass\n information is not available beforehand (e.g. obtained during\n extraction) or due to some other reason. In this case you should pass\n this information in geo bypass context passed as first argument. It may\n contain following fields:\n\n countries: List of geo unrestricted countries (similar\n to _GEO_COUNTRIES)\n ip_blocks: List of geo unrestricted IP blocks in CIDR notation\n (similar to _GEO_IP_BLOCKS)\n\n "
if (not self._x_forwarded_for_ip):
if (not self._downloader.params.get('geo_bypass', True)):
return
if (not geo_bypass_context):
geo_bypass_context = {}
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {'countries': geo_bypass_context}
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
if (not ip_block):
ip_blocks = geo_bypass_context.get('ip_blocks')
if (self._GEO_BYPASS and ip_blocks):
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(('[debug] Using fake IP %s as X-Forwarded-For.' % self._x_forwarded_for_ip))
return
country = self._downloader.params.get('geo_bypass_country', None)
if (not country):
countries = geo_bypass_context.get('countries')
if (self._GEO_BYPASS and countries):
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(('[debug] Using fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country.upper())))
| -8,957,561,630,360,193,000
|
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
|
youtube_dl/extractor/common.py
|
_initialize_geo_bypass
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _initialize_geo_bypass(self, geo_bypass_context):
"\n Initialize geo restriction bypass mechanism.\n\n This method is used to initialize geo bypass mechanism based on faking\n X-Forwarded-For HTTP header. A random country from provided country list\n is selected and a random IP belonging to this country is generated. This\n IP will be passed as X-Forwarded-For HTTP header in all subsequent\n HTTP requests.\n\n This method will be used for initial geo bypass mechanism initialization\n during the instance initialization with _GEO_COUNTRIES and\n _GEO_IP_BLOCKS.\n\n You may also manually call it from extractor's code if geo bypass\n information is not available beforehand (e.g. obtained during\n extraction) or due to some other reason. In this case you should pass\n this information in geo bypass context passed as first argument. It may\n contain following fields:\n\n countries: List of geo unrestricted countries (similar\n to _GEO_COUNTRIES)\n ip_blocks: List of geo unrestricted IP blocks in CIDR notation\n (similar to _GEO_IP_BLOCKS)\n\n "
if (not self._x_forwarded_for_ip):
if (not self._downloader.params.get('geo_bypass', True)):
return
if (not geo_bypass_context):
geo_bypass_context = {}
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {'countries': geo_bypass_context}
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
if (not ip_block):
ip_blocks = geo_bypass_context.get('ip_blocks')
if (self._GEO_BYPASS and ip_blocks):
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(('[debug] Using fake IP %s as X-Forwarded-For.' % self._x_forwarded_for_ip))
return
country = self._downloader.params.get('geo_bypass_country', None)
if (not country):
countries = geo_bypass_context.get('countries')
if (self._GEO_BYPASS and countries):
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(('[debug] Using fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country.upper())))
|
def extract(self, url):
'Extracts URL information and returns it in list of dicts.'
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
| -5,138,944,494,329,492,000
|
Extracts URL information and returns it in list of dicts.
|
youtube_dl/extractor/common.py
|
extract
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def extract(self, url):
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
|
def set_downloader(self, downloader):
'Sets the downloader for this IE.'
self._downloader = downloader
| -6,028,627,441,873,874,000
|
Sets the downloader for this IE.
|
youtube_dl/extractor/common.py
|
set_downloader
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def set_downloader(self, downloader):
self._downloader = downloader
|
def _real_initialize(self):
'Real initialization process. Redefine in subclasses.'
pass
| -1,551,871,763,434,820,600
|
Real initialization process. Redefine in subclasses.
|
youtube_dl/extractor/common.py
|
_real_initialize
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _real_initialize(self):
pass
|
def _real_extract(self, url):
'Real extraction process. Redefine in subclasses.'
pass
| 9,121,875,136,483,058,000
|
Real extraction process. Redefine in subclasses.
|
youtube_dl/extractor/common.py
|
_real_extract
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _real_extract(self, url):
pass
|
@classmethod
def ie_key(cls):
'A string for getting the InfoExtractor with get_info_extractor'
return compat_str(cls.__name__[:(- 2)])
| 5,437,829,511,205,614,000
|
A string for getting the InfoExtractor with get_info_extractor
|
youtube_dl/extractor/common.py
|
ie_key
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@classmethod
def ie_key(cls):
return compat_str(cls.__name__[:(- 2)])
|
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
'\n Return the response handle.\n\n See _download_webpage docstring for arguments specification.\n '
if (note is None):
self.report_download_webpage(video_id)
elif (note is not False):
if (video_id is None):
self.to_screen(('%s' % (note,)))
else:
self.to_screen(('%s: %s' % (video_id, note)))
if self._x_forwarded_for_ip:
if ('X-Forwarded-For' not in headers):
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if ((data is not None) or headers):
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
err.fp._error = err
return err.fp
if (errnote is False):
return False
if (errnote is None):
errnote = 'Unable to download webpage'
errmsg = ('%s: %s' % (errnote, error_to_compat_str(err)))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
| 4,311,339,888,729,569,300
|
Return the response handle.
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_request_webpage
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
'\n Return the response handle.\n\n See _download_webpage docstring for arguments specification.\n '
if (note is None):
self.report_download_webpage(video_id)
elif (note is not False):
if (video_id is None):
self.to_screen(('%s' % (note,)))
else:
self.to_screen(('%s: %s' % (video_id, note)))
if self._x_forwarded_for_ip:
if ('X-Forwarded-For' not in headers):
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if ((data is not None) or headers):
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
err.fp._error = err
return err.fp
if (errnote is False):
return False
if (errnote is None):
errnote = 'Unable to download webpage'
errmsg = ('%s: %s' % (errnote, error_to_compat_str(err)))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
|
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (page content as string, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if (urlh is False):
assert (not fatal)
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
| 7,614,932,583,454,537,000
|
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_download_webpage_handle
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (page content as string, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if (urlh is False):
assert (not fatal)
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
|
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the data of the page as a string.\n\n Arguments:\n url_or_request -- plain text URL as a string or\n a compat_urllib_request.Requestobject\n video_id -- Video/playlist/item identifier (string)\n\n Keyword arguments:\n note -- note printed before downloading (string)\n errnote -- note printed in case of an error (string)\n fatal -- flag denoting whether error should be considered fatal,\n i.e. whether it should cause ExtractionError to be raised,\n otherwise a warning will be reported and extraction continued\n tries -- number of tries\n timeout -- sleep interval between tries\n encoding -- encoding for a page content decoding, guessed automatically\n when not explicitly specified\n data -- POST data (bytes)\n headers -- HTTP headers (dict)\n query -- URL query (dict)\n expected_status -- allows to accept failed HTTP requests (non 2xx\n status code) by explicitly specifying a set of accepted status\n codes. Can be any of the following entities:\n - an integer type specifying an exact failed status code to\n accept\n - a list or a tuple of integer types specifying a list of\n failed status codes to accept\n - a callable accepting an actual failed status code and\n returning True if it should be accepted\n Note that this argument does not affect success status codes (2xx)\n which are always accepted.\n '
success = False
try_count = 0
while (success is False):
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if (try_count >= tries):
raise e
self._sleep(timeout, video_id)
if (res is False):
return res
else:
(content, _) = res
return content
| 8,941,889,573,552,861,000
|
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
|
youtube_dl/extractor/common.py
|
_download_webpage
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the data of the page as a string.\n\n Arguments:\n url_or_request -- plain text URL as a string or\n a compat_urllib_request.Requestobject\n video_id -- Video/playlist/item identifier (string)\n\n Keyword arguments:\n note -- note printed before downloading (string)\n errnote -- note printed in case of an error (string)\n fatal -- flag denoting whether error should be considered fatal,\n i.e. whether it should cause ExtractionError to be raised,\n otherwise a warning will be reported and extraction continued\n tries -- number of tries\n timeout -- sleep interval between tries\n encoding -- encoding for a page content decoding, guessed automatically\n when not explicitly specified\n data -- POST data (bytes)\n headers -- HTTP headers (dict)\n query -- URL query (dict)\n expected_status -- allows to accept failed HTTP requests (non 2xx\n status code) by explicitly specifying a set of accepted status\n codes. Can be any of the following entities:\n - an integer type specifying an exact failed status code to\n accept\n - a list or a tuple of integer types specifying a list of\n failed status codes to accept\n - a callable accepting an actual failed status code and\n returning True if it should be accepted\n Note that this argument does not affect success status codes (2xx)\n which are always accepted.\n '
success = False
try_count = 0
while (success is False):
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if (try_count >= tries):
raise e
self._sleep(timeout, video_id)
if (res is False):
return res
else:
(content, _) = res
return content
|
def _download_xml_handle(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (xml as an compat_etree_Element, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
if (res is False):
return res
(xml_string, urlh) = res
return (self._parse_xml(xml_string, video_id, transform_source=transform_source, fatal=fatal), urlh)
| -2,285,998,260,765,022,200
|
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_download_xml_handle
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_xml_handle(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (xml as an compat_etree_Element, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
if (res is False):
return res
(xml_string, urlh) = res
return (self._parse_xml(xml_string, video_id, transform_source=transform_source, fatal=fatal), urlh)
|
def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the xml as an compat_etree_Element.\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_xml_handle(url_or_request, video_id, note=note, errnote=errnote, transform_source=transform_source, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
return (res if (res is False) else res[0])
| -2,794,144,738,840,244,000
|
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_download_xml
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the xml as an compat_etree_Element.\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_xml_handle(url_or_request, video_id, note=note, errnote=errnote, transform_source=transform_source, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
return (res if (res is False) else res[0])
|
def _download_json_handle(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (JSON object, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
if (res is False):
return res
(json_string, urlh) = res
return (self._parse_json(json_string, video_id, transform_source=transform_source, fatal=fatal), urlh)
| -7,486,056,734,759,543,000
|
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_download_json_handle
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_json_handle(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return a tuple (JSON object, URL handle).\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
if (res is False):
return res
(json_string, urlh) = res
return (self._parse_json(json_string, video_id, transform_source=transform_source, fatal=fatal), urlh)
|
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the JSON object as a dict.\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_json_handle(url_or_request, video_id, note=note, errnote=errnote, transform_source=transform_source, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
return (res if (res is False) else res[0])
| 6,550,132,766,695,574,000
|
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
|
youtube_dl/extractor/common.py
|
_download_json
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
'\n Return the JSON object as a dict.\n\n See _download_webpage docstring for arguments specification.\n '
res = self._download_json_handle(url_or_request, video_id, note=note, errnote=errnote, transform_source=transform_source, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query, expected_status=expected_status)
return (res if (res is False) else res[0])
|
def to_screen(self, msg):
"Print msg to screen, prefixing it with '[ie_name]'"
self._downloader.to_screen(('[%s] %s' % (self.IE_NAME, msg)))
| -8,257,251,742,180,446,000
|
Print msg to screen, prefixing it with '[ie_name]'
|
youtube_dl/extractor/common.py
|
to_screen
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def to_screen(self, msg):
self._downloader.to_screen(('[%s] %s' % (self.IE_NAME, msg)))
|
def report_extraction(self, id_or_name):
'Report information extraction.'
self.to_screen(('%s: Extracting information' % id_or_name))
| 9,209,310,315,850,801,000
|
Report information extraction.
|
youtube_dl/extractor/common.py
|
report_extraction
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def report_extraction(self, id_or_name):
self.to_screen(('%s: Extracting information' % id_or_name))
|
def report_download_webpage(self, video_id):
'Report webpage download.'
self.to_screen(('%s: Downloading webpage' % video_id))
| -7,977,462,286,677,206,000
|
Report webpage download.
|
youtube_dl/extractor/common.py
|
report_download_webpage
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def report_download_webpage(self, video_id):
self.to_screen(('%s: Downloading webpage' % video_id))
|
def report_age_confirmation(self):
'Report attempt to confirm age.'
self.to_screen('Confirming age')
| 5,554,603,744,244,092,000
|
Report attempt to confirm age.
|
youtube_dl/extractor/common.py
|
report_age_confirmation
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def report_age_confirmation(self):
self.to_screen('Confirming age')
|
def report_login(self):
'Report attempt to log in.'
self.to_screen('Logging in')
| -2,843,299,703,482,748,000
|
Report attempt to log in.
|
youtube_dl/extractor/common.py
|
report_login
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def report_login(self):
self.to_screen('Logging in')
|
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
'Returns a URL that points to a page that should be processed'
video_info = {'_type': 'url', 'url': url, 'ie_key': ie}
if (video_id is not None):
video_info['id'] = video_id
if (video_title is not None):
video_info['title'] = video_title
return video_info
| 2,635,067,718,620,197,000
|
Returns a URL that points to a page that should be processed
|
youtube_dl/extractor/common.py
|
url_result
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
video_info = {'_type': 'url', 'url': url, 'ie_key': ie}
if (video_id is not None):
video_info['id'] = video_id
if (video_title is not None):
video_info['title'] = video_title
return video_info
|
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
'Returns a playlist'
video_info = {'_type': 'playlist', 'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
| -8,882,779,261,970,664,000
|
Returns a playlist
|
youtube_dl/extractor/common.py
|
playlist_result
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
video_info = {'_type': 'playlist', 'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
|
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
'\n Perform a regex search on the given string, using a single or a list of\n patterns returning the first matching group.\n In case of failure return a default value or raise a WARNING or a\n RegexNotFoundError, depending on fatal, specifying the field name.\n '
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if ((not self._downloader.params.get('no_color')) and (compat_os_name != 'nt') and sys.stderr.isatty()):
_name = ('\x1b[0;34m%s\x1b[0m' % name)
else:
_name = name
if mobj:
if (group is None):
return next((g for g in mobj.groups() if (g is not None)))
else:
return mobj.group(group)
elif (default is not NO_DEFAULT):
return default
elif fatal:
raise RegexNotFoundError(('Unable to extract %s' % _name))
else:
self._downloader.report_warning((('unable to extract %s' % _name) + bug_reports_message()))
return None
| 9,130,158,884,569,310,000
|
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
|
youtube_dl/extractor/common.py
|
_search_regex
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
'\n Perform a regex search on the given string, using a single or a list of\n patterns returning the first matching group.\n In case of failure return a default value or raise a WARNING or a\n RegexNotFoundError, depending on fatal, specifying the field name.\n '
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if ((not self._downloader.params.get('no_color')) and (compat_os_name != 'nt') and sys.stderr.isatty()):
_name = ('\x1b[0;34m%s\x1b[0m' % name)
else:
_name = name
if mobj:
if (group is None):
return next((g for g in mobj.groups() if (g is not None)))
else:
return mobj.group(group)
elif (default is not NO_DEFAULT):
return default
elif fatal:
raise RegexNotFoundError(('Unable to extract %s' % _name))
else:
self._downloader.report_warning((('unable to extract %s' % _name) + bug_reports_message()))
return None
|
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
'\n Like _search_regex, but strips HTML tags and unescapes entities.\n '
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
| 8,618,604,773,723,527,000
|
Like _search_regex, but strips HTML tags and unescapes entities.
|
youtube_dl/extractor/common.py
|
_html_search_regex
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
'\n \n '
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
|
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"\n Get the login info as (username, password)\n First look for the manually specified credentials using username_option\n and password_option as keys in params dictionary. If no such credentials\n available look in the netrc file using the netrc_machine or _NETRC_MACHINE\n value.\n If there's no info available, return (None, None)\n "
if (self._downloader is None):
return (None, None)
downloader_params = self._downloader.params
if (downloader_params.get(username_option) is not None):
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
(username, password) = self._get_netrc_login_info(netrc_machine)
return (username, password)
| -4,727,685,870,909,069,000
|
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
|
youtube_dl/extractor/common.py
|
_get_login_info
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"\n Get the login info as (username, password)\n First look for the manually specified credentials using username_option\n and password_option as keys in params dictionary. If no such credentials\n available look in the netrc file using the netrc_machine or _NETRC_MACHINE\n value.\n If there's no info available, return (None, None)\n "
if (self._downloader is None):
return (None, None)
downloader_params = self._downloader.params
if (downloader_params.get(username_option) is not None):
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
(username, password) = self._get_netrc_login_info(netrc_machine)
return (username, password)
|
def _get_tfa_info(self, note='two-factor verification code'):
"\n Get the two-factor authentication info\n TODO - asking the user will be required for sms/phone verify\n currently just uses the command line option\n If there's no info available, return None\n "
if (self._downloader is None):
return None
downloader_params = self._downloader.params
if (downloader_params.get('twofactor') is not None):
return downloader_params['twofactor']
return compat_getpass(('Type %s and press [Return]: ' % note))
| -1,595,709,114,444,867,000
|
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
|
youtube_dl/extractor/common.py
|
_get_tfa_info
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _get_tfa_info(self, note='two-factor verification code'):
"\n Get the two-factor authentication info\n TODO - asking the user will be required for sms/phone verify\n currently just uses the command line option\n If there's no info available, return None\n "
if (self._downloader is None):
return None
downloader_params = self._downloader.params
if (downloader_params.get('twofactor') is not None):
return downloader_params['twofactor']
return compat_getpass(('Type %s and press [Return]: ' % note))
|
def http_scheme(self):
' Either "http:" or "https:", depending on the user\'s preferences '
return ('http:' if self._downloader.params.get('prefer_insecure', False) else 'https:')
| -2,735,384,092,449,529,300
|
Either "http:" or "https:", depending on the user's preferences
|
youtube_dl/extractor/common.py
|
http_scheme
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def http_scheme(self):
' Either "http:" or "https:", depending on the user\'s preferences '
return ('http:' if self._downloader.params.get('prefer_insecure', False) else 'https:')
|
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
'\n Parse formats from MPD manifest.\n References:\n 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),\n http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip\n 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP\n '
if (mpd_doc.get('type') == 'dynamic'):
return []
namespace = self._search_regex('(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return (element.find(_add_ns('ContentProtection')) is not None)
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if (segment_timeline is not None):
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += (1 + r)
ms_info['s'].append({'t': int(s.get('t', 0)), 'd': int(s.attrib['d']), 'r': r})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if (initialization is not None):
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if (segment_list is not None):
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if (segment_template is not None):
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = (parse_duration(period.get('duration')) or mpd_duration)
period_ms_info = extract_multisegment_info(period, {'start_number': 1, 'timescale': 1})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if (content_type == 'text'):
pass
elif (content_type in ('video', 'audio')):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if (base_url_e is not None):
base_url = (base_url_e.text + base_url)
if re.match('^https?://', base_url):
break
if (mpd_base_url and (not re.match('^https?://', base_url))):
if ((not mpd_base_url.endswith('/')) and (not base_url.startswith('/'))):
mpd_base_url += '/'
base_url = (mpd_base_url + base_url)
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none((url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if (url_el is not None) else None))
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {'format_id': (('%s-%s' % (mpd_id, representation_id)) if mpd_id else representation_id), 'manifest_url': mpd_url, 'ext': mimetype2ext(mime_type), 'width': int_or_none(representation_attrib.get('width')), 'height': int_or_none(representation_attrib.get('height')), 'tbr': float_or_none(bandwidth, 1000), 'asr': int_or_none(representation_attrib.get('audioSamplingRate')), 'fps': int_or_none(representation_attrib.get('frameRate')), 'language': (lang if (lang not in ('mul', 'und', 'zxx', 'mis')) else None), 'format_note': ('DASH %s' % content_type), 'filesize': filesize, 'container': (mimetype2ext(mime_type) + '_dash')}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
t = ''
in_template = False
for c in tmpl:
t += c
if (c == '$'):
in_template = (not in_template)
elif ((c == '%') and (not in_template)):
t += c
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(('\\$(%s)\\$' % '|'.join(identifiers)), '%(\\1)d', t)
t = re.sub(('\\$(%s)%%([^$]+)\\$' % '|'.join(identifiers)), '%(\\1)\\2', t)
t.replace('$$', '$')
return t
if ('initialization' in representation_ms_info):
initialization_template = prepare_template('initialization', ('Bandwidth',))
representation_ms_info['initialization_url'] = (initialization_template % {'Bandwidth': bandwidth})
def location_key(location):
return ('url' if re.match('^https?://', location) else 'path')
if (('segment_urls' not in representation_ms_info) and ('media' in representation_ms_info)):
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
if (('%(Number' in media_template) and ('s' not in representation_ms_info)):
segment_duration = None
if (('total_number' not in representation_ms_info) and ('segment_duration' in representation_ms_info)):
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil((float(period_duration) / segment_duration)))
representation_ms_info['fragments'] = [{media_location_key: (media_template % {'Number': segment_number, 'Bandwidth': bandwidth}), 'duration': segment_duration} for segment_number in range(representation_ms_info['start_number'], (representation_ms_info['total_number'] + representation_ms_info['start_number']))]
else:
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = (media_template % {'Time': segment_time, 'Bandwidth': bandwidth, 'Number': segment_number})
representation_ms_info['fragments'].append({media_location_key: segment_url, 'duration': float_or_none(segment_d, representation_ms_info['timescale'])})
for (num, s) in enumerate(representation_ms_info['s']):
segment_time = (s.get('t') or segment_time)
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif (('segment_urls' in representation_ms_info) and ('s' in representation_ms_info)):
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range((s.get('r', 0) + 1)):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({location_key(segment_uri): segment_uri, 'duration': duration})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif ('segment_urls' in representation_ms_info):
fragments = []
segment_duration = (float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) if ('segment_duration' in representation_ms_info) else None)
for segment_url in representation_ms_info['segment_urls']:
fragment = {location_key(segment_url): segment_url}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
if ('fragments' in representation_ms_info):
f.update({'url': (mpd_url or base_url), 'fragment_base_url': base_url, 'fragments': [], 'protocol': 'http_dash_segments'})
if ('initialization_url' in representation_ms_info):
initialization_url = representation_ms_info['initialization_url']
if (not f.get('url')):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
f['url'] = base_url
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning(('Unknown MIME type %s in DASH manifest' % mime_type))
return formats
| 7,961,288,481,499,288,000
|
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
|
youtube_dl/extractor/common.py
|
_parse_mpd_formats
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url=, formats_dict={}, mpd_url=None):
'\n Parse formats from MPD manifest.\n References:\n 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),\n http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip\n 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP\n '
if (mpd_doc.get('type') == 'dynamic'):
return []
namespace = self._search_regex('(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return (element.find(_add_ns('ContentProtection')) is not None)
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if (segment_timeline is not None):
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += (1 + r)
ms_info['s'].append({'t': int(s.get('t', 0)), 'd': int(s.attrib['d']), 'r': r})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if (initialization is not None):
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if (segment_list is not None):
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if (segment_template is not None):
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = (parse_duration(period.get('duration')) or mpd_duration)
period_ms_info = extract_multisegment_info(period, {'start_number': 1, 'timescale': 1})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if (content_type == 'text'):
pass
elif (content_type in ('video', 'audio')):
base_url =
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if (base_url_e is not None):
base_url = (base_url_e.text + base_url)
if re.match('^https?://', base_url):
break
if (mpd_base_url and (not re.match('^https?://', base_url))):
if ((not mpd_base_url.endswith('/')) and (not base_url.startswith('/'))):
mpd_base_url += '/'
base_url = (mpd_base_url + base_url)
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none((url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if (url_el is not None) else None))
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {'format_id': (('%s-%s' % (mpd_id, representation_id)) if mpd_id else representation_id), 'manifest_url': mpd_url, 'ext': mimetype2ext(mime_type), 'width': int_or_none(representation_attrib.get('width')), 'height': int_or_none(representation_attrib.get('height')), 'tbr': float_or_none(bandwidth, 1000), 'asr': int_or_none(representation_attrib.get('audioSamplingRate')), 'fps': int_or_none(representation_attrib.get('frameRate')), 'language': (lang if (lang not in ('mul', 'und', 'zxx', 'mis')) else None), 'format_note': ('DASH %s' % content_type), 'filesize': filesize, 'container': (mimetype2ext(mime_type) + '_dash')}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
t =
in_template = False
for c in tmpl:
t += c
if (c == '$'):
in_template = (not in_template)
elif ((c == '%') and (not in_template)):
t += c
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(('\\$(%s)\\$' % '|'.join(identifiers)), '%(\\1)d', t)
t = re.sub(('\\$(%s)%%([^$]+)\\$' % '|'.join(identifiers)), '%(\\1)\\2', t)
t.replace('$$', '$')
return t
if ('initialization' in representation_ms_info):
initialization_template = prepare_template('initialization', ('Bandwidth',))
representation_ms_info['initialization_url'] = (initialization_template % {'Bandwidth': bandwidth})
def location_key(location):
return ('url' if re.match('^https?://', location) else 'path')
if (('segment_urls' not in representation_ms_info) and ('media' in representation_ms_info)):
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
if (('%(Number' in media_template) and ('s' not in representation_ms_info)):
segment_duration = None
if (('total_number' not in representation_ms_info) and ('segment_duration' in representation_ms_info)):
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil((float(period_duration) / segment_duration)))
representation_ms_info['fragments'] = [{media_location_key: (media_template % {'Number': segment_number, 'Bandwidth': bandwidth}), 'duration': segment_duration} for segment_number in range(representation_ms_info['start_number'], (representation_ms_info['total_number'] + representation_ms_info['start_number']))]
else:
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = (media_template % {'Time': segment_time, 'Bandwidth': bandwidth, 'Number': segment_number})
representation_ms_info['fragments'].append({media_location_key: segment_url, 'duration': float_or_none(segment_d, representation_ms_info['timescale'])})
for (num, s) in enumerate(representation_ms_info['s']):
segment_time = (s.get('t') or segment_time)
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif (('segment_urls' in representation_ms_info) and ('s' in representation_ms_info)):
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range((s.get('r', 0) + 1)):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({location_key(segment_uri): segment_uri, 'duration': duration})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif ('segment_urls' in representation_ms_info):
fragments = []
segment_duration = (float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) if ('segment_duration' in representation_ms_info) else None)
for segment_url in representation_ms_info['segment_urls']:
fragment = {location_key(segment_url): segment_url}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
if ('fragments' in representation_ms_info):
f.update({'url': (mpd_url or base_url), 'fragment_base_url': base_url, 'fragments': [], 'protocol': 'http_dash_segments'})
if ('initialization_url' in representation_ms_info):
initialization_url = representation_ms_info['initialization_url']
if (not f.get('url')):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
f['url'] = base_url
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning(('Unknown MIME type %s in DASH manifest' % mime_type))
return formats
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
'\n Parse formats from ISM manifest.\n References:\n 1. [MS-SSTR]: Smooth Streaming Protocol,\n https://msdn.microsoft.com/en-us/library/ff469518.aspx\n '
if ((ism_doc.get('IsLive') == 'TRUE') or (ism_doc.find('Protection') is not None)):
return []
duration = int(ism_doc.attrib['Duration'])
timescale = (int_or_none(ism_doc.get('TimeScale')) or 10000000)
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if (stream_type not in ('video', 'audio')):
continue
url_pattern = stream.attrib['Url']
stream_timescale = (int_or_none(stream.get('TimeScale')) or timescale)
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', ('AACL' if (track.get('AudioTag') == '255') else None))
if (fourcc not in ('H264', 'AVC1', 'AACL')):
self.report_warning(('%s is not a supported codec' % fourcc))
continue
tbr = (int(track.attrib['Bitrate']) // 1000)
width = int_or_none((track.get('MaxWidth') or track.get('Width')))
height = int_or_none((track.get('MaxHeight') or track.get('Height')))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub('{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {'time': 0}
stream_fragments = stream.findall('c')
for (stream_fragment_index, stream_fragment) in enumerate(stream_fragments):
fragment_ctx['time'] = (int_or_none(stream_fragment.get('t')) or fragment_ctx['time'])
fragment_repeat = (int_or_none(stream_fragment.get('r')) or 1)
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if (not fragment_ctx['duration']):
try:
next_fragment_time = int(stream_fragment[(stream_fragment_index + 1)].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = ((next_fragment_time - fragment_ctx['time']) / fragment_repeat)
for _ in range(fragment_repeat):
fragments.append({'url': re.sub('{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern), 'duration': (fragment_ctx['duration'] / stream_timescale)})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({'format_id': '-'.join(format_id), 'url': ism_url, 'manifest_url': ism_url, 'ext': ('ismv' if (stream_type == 'video') else 'isma'), 'width': width, 'height': height, 'tbr': tbr, 'asr': sampling_rate, 'vcodec': ('none' if (stream_type == 'audio') else fourcc), 'acodec': ('none' if (stream_type == 'video') else fourcc), 'protocol': 'ism', 'fragments': fragments, '_download_params': {'duration': duration, 'timescale': stream_timescale, 'width': (width or 0), 'height': (height or 0), 'fourcc': fourcc, 'codec_private_data': track.get('CodecPrivateData'), 'sampling_rate': sampling_rate, 'channels': int_or_none(track.get('Channels', 2)), 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4))}})
return formats
| -2,052,320,450,133,081,300
|
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
|
youtube_dl/extractor/common.py
|
_parse_ism_formats
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
'\n Parse formats from ISM manifest.\n References:\n 1. [MS-SSTR]: Smooth Streaming Protocol,\n https://msdn.microsoft.com/en-us/library/ff469518.aspx\n '
if ((ism_doc.get('IsLive') == 'TRUE') or (ism_doc.find('Protection') is not None)):
return []
duration = int(ism_doc.attrib['Duration'])
timescale = (int_or_none(ism_doc.get('TimeScale')) or 10000000)
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if (stream_type not in ('video', 'audio')):
continue
url_pattern = stream.attrib['Url']
stream_timescale = (int_or_none(stream.get('TimeScale')) or timescale)
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', ('AACL' if (track.get('AudioTag') == '255') else None))
if (fourcc not in ('H264', 'AVC1', 'AACL')):
self.report_warning(('%s is not a supported codec' % fourcc))
continue
tbr = (int(track.attrib['Bitrate']) // 1000)
width = int_or_none((track.get('MaxWidth') or track.get('Width')))
height = int_or_none((track.get('MaxHeight') or track.get('Height')))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub('{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {'time': 0}
stream_fragments = stream.findall('c')
for (stream_fragment_index, stream_fragment) in enumerate(stream_fragments):
fragment_ctx['time'] = (int_or_none(stream_fragment.get('t')) or fragment_ctx['time'])
fragment_repeat = (int_or_none(stream_fragment.get('r')) or 1)
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if (not fragment_ctx['duration']):
try:
next_fragment_time = int(stream_fragment[(stream_fragment_index + 1)].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = ((next_fragment_time - fragment_ctx['time']) / fragment_repeat)
for _ in range(fragment_repeat):
fragments.append({'url': re.sub('{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern), 'duration': (fragment_ctx['duration'] / stream_timescale)})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({'format_id': '-'.join(format_id), 'url': ism_url, 'manifest_url': ism_url, 'ext': ('ismv' if (stream_type == 'video') else 'isma'), 'width': width, 'height': height, 'tbr': tbr, 'asr': sampling_rate, 'vcodec': ('none' if (stream_type == 'audio') else fourcc), 'acodec': ('none' if (stream_type == 'video') else fourcc), 'protocol': 'ism', 'fragments': fragments, '_download_params': {'duration': duration, 'timescale': stream_timescale, 'width': (width or 0), 'height': (height or 0), 'fourcc': fourcc, 'codec_private_data': track.get('CodecPrivateData'), 'sampling_rate': sampling_rate, 'channels': int_or_none(track.get('Channels', 2)), 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4))}})
return formats
|
def _live_title(self, name):
' Generate the title for a live video '
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return ((name + ' ') + now_str)
| 1,526,277,538,303,499,000
|
Generate the title for a live video
|
youtube_dl/extractor/common.py
|
_live_title
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _live_title(self, name):
' '
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return ((name + ' ') + now_str)
|
def _get_cookies(self, url):
' Return a compat_cookies.SimpleCookie with the cookies for the url '
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
| 192,552,671,788,474,620
|
Return a compat_cookies.SimpleCookie with the cookies for the url
|
youtube_dl/extractor/common.py
|
_get_cookies
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _get_cookies(self, url):
' '
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
|
def _apply_first_set_cookie_header(self, url_handle, cookie):
'\n Apply first Set-Cookie header instead of the last. Experimental.\n\n Some sites (e.g. [1-3]) may serve two cookies under the same name\n in Set-Cookie header and expect the first (old) one to be set rather\n than second (new). However, as of RFC6265 the newer one cookie\n should be set into cookie store what actually happens.\n We will workaround this issue by resetting the cookie to\n the first one manually.\n 1. https://new.vk.com/\n 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201\n 3. https://learning.oreilly.com/\n '
for (header, cookies) in url_handle.headers.items():
if (header.lower() != 'set-cookie'):
continue
if (sys.version_info[0] >= 3):
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(('%s=(.+?);.*?\\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie), cookies)
if cookie_value:
(value, domain) = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
| -3,143,821,134,783,491,000
|
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
|
youtube_dl/extractor/common.py
|
_apply_first_set_cookie_header
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _apply_first_set_cookie_header(self, url_handle, cookie):
'\n Apply first Set-Cookie header instead of the last. Experimental.\n\n Some sites (e.g. [1-3]) may serve two cookies under the same name\n in Set-Cookie header and expect the first (old) one to be set rather\n than second (new). However, as of RFC6265 the newer one cookie\n should be set into cookie store what actually happens.\n We will workaround this issue by resetting the cookie to\n the first one manually.\n 1. https://new.vk.com/\n 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201\n 3. https://learning.oreilly.com/\n '
for (header, cookies) in url_handle.headers.items():
if (header.lower() != 'set-cookie'):
continue
if (sys.version_info[0] >= 3):
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(('%s=(.+?);.*?\\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie), cookies)
if cookie_value:
(value, domain) = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
|
def is_suitable(self, age_limit):
' Test whether the extractor is generally suitable for the given\n age limit (i.e. pornographic sites are not, all others usually are) '
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(tc.get('info_dict', {}).get('age_limit'), age_limit)
if (not is_restricted):
return True
any_restricted = (any_restricted or is_restricted)
return (not any_restricted)
| -8,900,054,884,063,124,000
|
Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are)
|
youtube_dl/extractor/common.py
|
is_suitable
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def is_suitable(self, age_limit):
' Test whether the extractor is generally suitable for the given\n age limit (i.e. pornographic sites are not, all others usually are) '
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(tc.get('info_dict', {}).get('age_limit'), age_limit)
if (not is_restricted):
return True
any_restricted = (any_restricted or is_restricted)
return (not any_restricted)
|
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
' Merge subtitle items for one language. Items with duplicated URLs\n will be dropped. '
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if (item['url'] not in list1_urls)])
return ret
| -8,306,789,552,558,350,000
|
Merge subtitle items for one language. Items with duplicated URLs
will be dropped.
|
youtube_dl/extractor/common.py
|
_merge_subtitle_items
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
' Merge subtitle items for one language. Items with duplicated URLs\n will be dropped. '
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if (item['url'] not in list1_urls)])
return ret
|
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
' Merge two subtitle dictionaries, language by language. '
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
| -8,135,354,963,678,094,000
|
Merge two subtitle dictionaries, language by language.
|
youtube_dl/extractor/common.py
|
_merge_subtitles
|
DevSecOpsGuy/youtube-dl-1
|
python
|
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
' '
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
|
def _get_n_results(self, query, n):
'Get a specified number of results for a query'
raise NotImplementedError('This method must be implemented by subclasses')
| -6,232,748,535,575,834,000
|
Get a specified number of results for a query
|
youtube_dl/extractor/common.py
|
_get_n_results
|
DevSecOpsGuy/youtube-dl-1
|
python
|
def _get_n_results(self, query, n):
raise NotImplementedError('This method must be implemented by subclasses')
|
def __init__(self, client, **kwargs):
'\n Creates a new TransferDeviceClientCompositeOperations object\n\n :param TransferDeviceClient client:\n The service client which will be wrapped by this object\n '
self.client = client
| 3,272,872,751,382,135,300
|
Creates a new TransferDeviceClientCompositeOperations object
:param TransferDeviceClient client:
The service client which will be wrapped by this object
|
src/oci/dts/transfer_device_client_composite_operations.py
|
__init__
|
CentroidChef/oci-python-sdk
|
python
|
def __init__(self, client, **kwargs):
'\n Creates a new TransferDeviceClientCompositeOperations object\n\n :param TransferDeviceClient client:\n The service client which will be wrapped by this object\n '
self.client = client
|
def update_transfer_device_and_wait_for_state(self, id, transfer_device_label, update_transfer_device_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
'\n Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon\n to enter the given state(s).\n\n :param str id: (required)\n ID of the Transfer Job\n\n :param str transfer_device_label: (required)\n Label of the Transfer Device\n\n :param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)\n fields to update\n\n :param list[str] wait_for_states:\n An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`\n\n :param dict operation_kwargs:\n A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`\n\n :param dict waiter_kwargs:\n A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``\n as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait\n '
operation_result = self.client.update_transfer_device(id, transfer_device_label, update_transfer_device_details, **operation_kwargs)
if (not wait_for_states):
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(self.client, self.client.get_transfer_device(wait_for_resource_id), evaluate_response=(lambda r: (getattr(r.data, 'lifecycle_state') and (getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states))), **waiter_kwargs)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| -2,558,973,560,206,875,000
|
Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon
to enter the given state(s).
:param str id: (required)
ID of the Transfer Job
:param str transfer_device_label: (required)
Label of the Transfer Device
:param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)
fields to update
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
|
src/oci/dts/transfer_device_client_composite_operations.py
|
update_transfer_device_and_wait_for_state
|
CentroidChef/oci-python-sdk
|
python
|
def update_transfer_device_and_wait_for_state(self, id, transfer_device_label, update_transfer_device_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
'\n Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon\n to enter the given state(s).\n\n :param str id: (required)\n ID of the Transfer Job\n\n :param str transfer_device_label: (required)\n Label of the Transfer Device\n\n :param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)\n fields to update\n\n :param list[str] wait_for_states:\n An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`\n\n :param dict operation_kwargs:\n A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`\n\n :param dict waiter_kwargs:\n A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``\n as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait\n '
operation_result = self.client.update_transfer_device(id, transfer_device_label, update_transfer_device_details, **operation_kwargs)
if (not wait_for_states):
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(self.client, self.client.get_transfer_device(wait_for_resource_id), evaluate_response=(lambda r: (getattr(r.data, 'lifecycle_state') and (getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states))), **waiter_kwargs)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
|
def do_plots_c(Ud, Unew):
' plot Ud,new and Ud with zoom on the bug '
pylab.clf()
pylab.cla()
f = pylab.figure()
f.text(0.5, 0.95, '$U_{\\rm d}$ (left) and $U_{\\rm d, new}$ (right) ', horizontalalignment='center')
pylab.subplot(221)
pylab.imshow(Ud[0])
pylab.ylabel('# of cells', size=8)
pylab.subplot(223)
pylab.imshow(Ud[1])
pylab.xlim(1, 32)
pylab.xlabel('# of cells', size=8)
pylab.ylabel('# of cells', size=8)
pylab.subplot(222)
pylab.imshow(Unew[0])
pylab.ylabel('# of cells', size=8)
pylab.subplot(224)
pylab.imshow(Unew[1])
pylab.xlim(1, 32)
pylab.xlabel('# of cells', size=8)
pylab.ylabel('# of cells', size=8)
pylab.savefig('plots/item_c_Udnew.png')
| -4,154,372,507,956,683,300
|
plot Ud,new and Ud with zoom on the bug
|
homework5_elliptic_PDES/part_c.py
|
do_plots_c
|
aquario-crypto/Numerical_Methods_for_Physics
|
python
|
def do_plots_c(Ud, Unew):
' '
pylab.clf()
pylab.cla()
f = pylab.figure()
f.text(0.5, 0.95, '$U_{\\rm d}$ (left) and $U_{\\rm d, new}$ (right) ', horizontalalignment='center')
pylab.subplot(221)
pylab.imshow(Ud[0])
pylab.ylabel('# of cells', size=8)
pylab.subplot(223)
pylab.imshow(Ud[1])
pylab.xlim(1, 32)
pylab.xlabel('# of cells', size=8)
pylab.ylabel('# of cells', size=8)
pylab.subplot(222)
pylab.imshow(Unew[0])
pylab.ylabel('# of cells', size=8)
pylab.subplot(224)
pylab.imshow(Unew[1])
pylab.xlim(1, 32)
pylab.xlabel('# of cells', size=8)
pylab.ylabel('# of cells', size=8)
pylab.savefig('plots/item_c_Udnew.png')
|
def doPartC(Ustar, phi_num, Ud, nx, ny, xmin, xmax, ymin, ymax, DO_PLOTS):
' coordinates of centers '
dx = ((xmax - xmin) / nx)
dy = ((ymax - ymin) / ny)
' calcuates the new gradient'
Gphi = numpy.gradient(phi_num, dx, dy)
' recover Ud, new '
Unew = map(operator.sub, Ustar, Gphi)
if (DO_PLOTS == 1):
do_plots_c(Ud, Unew)
return 0
| 1,935,657,053,746,374,100
|
coordinates of centers
|
homework5_elliptic_PDES/part_c.py
|
doPartC
|
aquario-crypto/Numerical_Methods_for_Physics
|
python
|
def doPartC(Ustar, phi_num, Ud, nx, ny, xmin, xmax, ymin, ymax, DO_PLOTS):
' '
dx = ((xmax - xmin) / nx)
dy = ((ymax - ymin) / ny)
' calcuates the new gradient'
Gphi = numpy.gradient(phi_num, dx, dy)
' recover Ud, new '
Unew = map(operator.sub, Ustar, Gphi)
if (DO_PLOTS == 1):
do_plots_c(Ud, Unew)
return 0
|
@ingredient.config
def cfg():
'Model configuration.'
name = ''
parameters = {}
| 8,649,613,754,139,806,000
|
Model configuration.
|
exp/ingredients/model.py
|
cfg
|
BorgwardtLab/topo-ae-distances
|
python
|
@ingredient.config
def cfg():
name =
parameters = {}
|
@ingredient.named_config
def TopologicalSurrogateAutoencoder():
'TopologicalSurrogateAutoencoder.'
name = 'TopologicalSurrogateAutoencoder'
parameters = {'d_latent': ((8 * 2) * 2), 'batch_size': 32, 'arch': [256, 256, 256, 256]}
| 255,811,074,982,332,700
|
TopologicalSurrogateAutoencoder.
|
exp/ingredients/model.py
|
TopologicalSurrogateAutoencoder
|
BorgwardtLab/topo-ae-distances
|
python
|
@ingredient.named_config
def TopologicalSurrogateAutoencoder():
name = 'TopologicalSurrogateAutoencoder'
parameters = {'d_latent': ((8 * 2) * 2), 'batch_size': 32, 'arch': [256, 256, 256, 256]}
|
@ingredient.capture
def get_instance(name, parameters, _log, _seed):
'Get an instance of a model according to parameters in the configuration.\n\n Also, check if the provided parameters fit to the signature of the model\n class and log default values if not defined via the configuration.\n\n '
model_cls = getattr(models, name)
signature = inspect.signature(model_cls)
available_parameters = signature.parameters
for key in parameters.keys():
if (key not in available_parameters.keys()):
raise ValueError(f"{key} is not available in {name}'s Constructor")
optional_parameters = list(available_parameters.keys())[4:]
for parameter_name in optional_parameters:
parameter_keys = list(parameters.keys())
if (parameter_name not in parameter_keys):
if (parameter_name != 'random_state'):
default = available_parameters[parameter_name].default
_log.warning(f'Optional parameter {parameter_name} not explicitly defined, will run with {parameter_name}={default}')
else:
_log.info(f'Passing seed of experiment to model parameter `random_state`.')
parameters['random_state'] = _seed
return model_cls(**parameters)
| -27,743,696,695,635,120
|
Get an instance of a model according to parameters in the configuration.
Also, check if the provided parameters fit to the signature of the model
class and log default values if not defined via the configuration.
|
exp/ingredients/model.py
|
get_instance
|
BorgwardtLab/topo-ae-distances
|
python
|
@ingredient.capture
def get_instance(name, parameters, _log, _seed):
'Get an instance of a model according to parameters in the configuration.\n\n Also, check if the provided parameters fit to the signature of the model\n class and log default values if not defined via the configuration.\n\n '
model_cls = getattr(models, name)
signature = inspect.signature(model_cls)
available_parameters = signature.parameters
for key in parameters.keys():
if (key not in available_parameters.keys()):
raise ValueError(f"{key} is not available in {name}'s Constructor")
optional_parameters = list(available_parameters.keys())[4:]
for parameter_name in optional_parameters:
parameter_keys = list(parameters.keys())
if (parameter_name not in parameter_keys):
if (parameter_name != 'random_state'):
default = available_parameters[parameter_name].default
_log.warning(f'Optional parameter {parameter_name} not explicitly defined, will run with {parameter_name}={default}')
else:
_log.info(f'Passing seed of experiment to model parameter `random_state`.')
parameters['random_state'] = _seed
return model_cls(**parameters)
|
def get_conn(self):
'\n Retrieves connection to Cloud Translate\n\n :return: Google Cloud Translate client object.\n :rtype: Client\n '
if (not self._client):
self._client = Client(credentials=self._get_credentials())
return self._client
| 8,639,950,463,497,811,000
|
Retrieves connection to Cloud Translate
:return: Google Cloud Translate client object.
:rtype: Client
|
airflow/contrib/hooks/gcp_translate_hook.py
|
get_conn
|
CatarinaSilva/airflow
|
python
|
def get_conn(self):
'\n Retrieves connection to Cloud Translate\n\n :return: Google Cloud Translate client object.\n :rtype: Client\n '
if (not self._client):
self._client = Client(credentials=self._get_credentials())
return self._client
|
def translate(self, values, target_language, format_=None, source_language=None, model=None):
"Translate a string or list of strings.\n\n See https://cloud.google.com/translate/docs/translating-text\n\n :type values: str or list\n :param values: String or list of strings to translate.\n :type target_language: str\n :param target_language: The language to translate results into. This\n is required by the API and defaults to\n the target language of the current instance.\n :type format_: str\n :param format_: (Optional) One of ``text`` or ``html``, to specify\n if the input text is plain text or HTML.\n :type source_language: str or None\n :param source_language: (Optional) The language of the text to\n be translated.\n :type model: str or None\n :param model: (Optional) The model used to translate the text, such\n as ``'base'`` or ``'nmt'``.\n :rtype: str or list\n :returns: A list of dictionaries for each queried value. Each\n dictionary typically contains three keys (though not\n all will be present in all cases)\n\n * ``detectedSourceLanguage``: The detected language (as an\n ISO 639-1 language code) of the text.\n\n * ``translatedText``: The translation of the text into the\n target language.\n\n * ``input``: The corresponding input value.\n\n * ``model``: The model used to translate the text.\n\n If only a single value is passed, then only a single\n dictionary will be returned.\n :raises: :class:`~exceptions.ValueError` if the number of\n values and translations differ.\n "
client = self.get_conn()
return client.translate(values=values, target_language=target_language, format_=format_, source_language=source_language, model=model)
| -4,404,416,656,389,028,400
|
Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type model: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
|
airflow/contrib/hooks/gcp_translate_hook.py
|
translate
|
CatarinaSilva/airflow
|
python
|
def translate(self, values, target_language, format_=None, source_language=None, model=None):
"Translate a string or list of strings.\n\n See https://cloud.google.com/translate/docs/translating-text\n\n :type values: str or list\n :param values: String or list of strings to translate.\n :type target_language: str\n :param target_language: The language to translate results into. This\n is required by the API and defaults to\n the target language of the current instance.\n :type format_: str\n :param format_: (Optional) One of ``text`` or ``html``, to specify\n if the input text is plain text or HTML.\n :type source_language: str or None\n :param source_language: (Optional) The language of the text to\n be translated.\n :type model: str or None\n :param model: (Optional) The model used to translate the text, such\n as ``'base'`` or ``'nmt'``.\n :rtype: str or list\n :returns: A list of dictionaries for each queried value. Each\n dictionary typically contains three keys (though not\n all will be present in all cases)\n\n * ``detectedSourceLanguage``: The detected language (as an\n ISO 639-1 language code) of the text.\n\n * ``translatedText``: The translation of the text into the\n target language.\n\n * ``input``: The corresponding input value.\n\n * ``model``: The model used to translate the text.\n\n If only a single value is passed, then only a single\n dictionary will be returned.\n :raises: :class:`~exceptions.ValueError` if the number of\n values and translations differ.\n "
client = self.get_conn()
return client.translate(values=values, target_language=target_language, format_=format_, source_language=source_language, model=model)
|
@testing.requires_testing_data
def test_field_map_ctf():
'Test that field mapping can be done with CTF data.'
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50])
make_field_map(evoked, trans=trans_fname, subject='sample', subjects_dir=subjects_dir)
| 3,898,756,881,485,746,000
|
Test that field mapping can be done with CTF data.
|
mne/forward/tests/test_field_interpolation.py
|
test_field_map_ctf
|
0reza/mne-python
|
python
|
@testing.requires_testing_data
def test_field_map_ctf():
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50])
make_field_map(evoked, trans=trans_fname, subject='sample', subjects_dir=subjects_dir)
|
def test_legendre_val():
'Test Legendre polynomial (derivative) equivalence.'
rng = np.random.RandomState(0)
xs = np.linspace((- 1.0), 1.0, 1000)
n_terms = 100
vals_np = legendre.legvander(xs, (n_terms - 1))
for (nc, interp) in zip([100, 50], ['nearest', 'linear']):
(lut, n_fact) = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, interp, axis=0)
vals_i = lut_fun(xs)
assert_allclose(vals_np[:, 1:(vals_i.shape[1] + 1)], vals_i, rtol=0.01, atol=0.005)
ctheta = ((rng.rand(20, 30) * 2.0) - 1.0)
beta = (rng.rand(20, 30) * 0.8)
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros(((n_terms,) + beta.shape))
coeffs[1:] = (((np.cumprod(([beta] * (n_terms - 1)), axis=0) * ((2.0 * n) + 1.0)) * ((2.0 * n) + 1.0)) / n)
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[(ci1, ci2)] = legendre.legval(ctheta[(ci1, ci2)], coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 0.01, 0.001)
ctheta = ((rng.rand((20 * 30)) * 2.0) - 1.0)
beta = (rng.rand((20 * 30)) * 0.8)
(lut, n_fact) = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
(lut, n_fact) = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
| 4,881,300,242,660,246,000
|
Test Legendre polynomial (derivative) equivalence.
|
mne/forward/tests/test_field_interpolation.py
|
test_legendre_val
|
0reza/mne-python
|
python
|
def test_legendre_val():
rng = np.random.RandomState(0)
xs = np.linspace((- 1.0), 1.0, 1000)
n_terms = 100
vals_np = legendre.legvander(xs, (n_terms - 1))
for (nc, interp) in zip([100, 50], ['nearest', 'linear']):
(lut, n_fact) = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, interp, axis=0)
vals_i = lut_fun(xs)
assert_allclose(vals_np[:, 1:(vals_i.shape[1] + 1)], vals_i, rtol=0.01, atol=0.005)
ctheta = ((rng.rand(20, 30) * 2.0) - 1.0)
beta = (rng.rand(20, 30) * 0.8)
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros(((n_terms,) + beta.shape))
coeffs[1:] = (((np.cumprod(([beta] * (n_terms - 1)), axis=0) * ((2.0 * n) + 1.0)) * ((2.0 * n) + 1.0)) / n)
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[(ci1, ci2)] = legendre.legval(ctheta[(ci1, ci2)], coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 0.01, 0.001)
ctheta = ((rng.rand((20 * 30)) * 2.0) - 1.0)
beta = (rng.rand((20 * 30)) * 0.8)
(lut, n_fact) = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
(lut, n_fact) = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace((- 1), 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
|
def test_legendre_table():
'Test Legendre table calculation.'
n = 10
for ch_type in ['eeg', 'meg']:
(lut1, n_fact1) = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :(n - 1)].copy()
n_fact1 = n_fact1[:(n - 1)].copy()
(lut2, n_fact2) = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
| -3,882,972,427,821,506,600
|
Test Legendre table calculation.
|
mne/forward/tests/test_field_interpolation.py
|
test_legendre_table
|
0reza/mne-python
|
python
|
def test_legendre_table():
n = 10
for ch_type in ['eeg', 'meg']:
(lut1, n_fact1) = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :(n - 1)].copy()
n_fact1 = n_fact1[:(n - 1)].copy()
(lut2, n_fact2) = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
|
@testing.requires_testing_data
def test_make_field_map_eeg():
'Test interpolation of EEG field onto head.'
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053']
surf = get_head_surf('sample', subjects_dir=subjects_dir)
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir)
pytest.raises(RuntimeError, make_field_map, evoked, None, subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 59))
assert (len(fmd[0]['ch_names']) == 59)
| -1,162,614,805,405,947,000
|
Test interpolation of EEG field onto head.
|
mne/forward/tests/test_field_interpolation.py
|
test_make_field_map_eeg
|
0reza/mne-python
|
python
|
@testing.requires_testing_data
def test_make_field_map_eeg():
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053']
surf = get_head_surf('sample', subjects_dir=subjects_dir)
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir)
pytest.raises(RuntimeError, make_field_map, evoked, None, subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 59))
assert (len(fmd[0]['ch_names']) == 59)
|
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
'Test interpolation of MEG field onto helmet | head.'
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
info['bads'] = info['ch_names'][:200]
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg', mode='foo')
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info, surf, 'meg')
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, None, subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106))
assert (len(fmd[0]['ch_names']) == 106)
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head', subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 106))
assert (len(fmd[0]['ch_names']) == 106)
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar', subjects_dir=subjects_dir, trans=trans_fname)
| -6,780,296,683,290,353,000
|
Test interpolation of MEG field onto helmet | head.
|
mne/forward/tests/test_field_interpolation.py
|
test_make_field_map_meg
|
0reza/mne-python
|
python
|
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
info['bads'] = info['ch_names'][:200]
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg', mode='foo')
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info, surf, 'meg')
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, None, subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106))
assert (len(fmd[0]['ch_names']) == 106)
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head', subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (642, 106))
assert (len(fmd[0]['ch_names']) == 106)
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar', subjects_dir=subjects_dir, trans=trans_fname)
|
@testing.requires_testing_data
def test_make_field_map_meeg():
'Test making a M/EEG field map onto helmet & head.'
evoked = read_evokeds(evoked_fname, baseline=((- 0.2), 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6))
assert_equal(maps[1]['data'].shape, (304, 31))
maxs = (1.2, 2.0)
mins = ((- 0.8), (- 1.3))
assert_equal(len(maxs), len(maps))
for (map_, max_, min_) in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=0.05)
assert_allclose(map_['data'].min(), min_, rtol=0.05)
assert_allclose(np.sqrt(np.sum((maps[0]['data'] ** 2))), 19.0903, atol=0.001, rtol=0.001)
assert_allclose(np.sqrt(np.sum((maps[1]['data'] ** 2))), 19.4748, atol=0.001, rtol=0.001)
| 5,279,945,788,715,939,000
|
Test making a M/EEG field map onto helmet & head.
|
mne/forward/tests/test_field_interpolation.py
|
test_make_field_map_meeg
|
0reza/mne-python
|
python
|
@testing.requires_testing_data
def test_make_field_map_meeg():
evoked = read_evokeds(evoked_fname, baseline=((- 0.2), 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample', subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6))
assert_equal(maps[1]['data'].shape, (304, 31))
maxs = (1.2, 2.0)
mins = ((- 0.8), (- 1.3))
assert_equal(len(maxs), len(maps))
for (map_, max_, min_) in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=0.05)
assert_allclose(map_['data'].min(), min_, rtol=0.05)
assert_allclose(np.sqrt(np.sum((maps[0]['data'] ** 2))), 19.0903, atol=0.001, rtol=0.001)
assert_allclose(np.sqrt(np.sum((maps[1]['data'] ** 2))), 19.4748, atol=0.001, rtol=0.001)
|
def _setup_args(info):
'Configure args for test_as_meg_type_evoked.'
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
(int_rad, _, lut_fun, n_fact) = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0.0, 0.0, 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin, ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
| 7,179,594,168,567,113,000
|
Configure args for test_as_meg_type_evoked.
|
mne/forward/tests/test_field_interpolation.py
|
_setup_args
|
0reza/mne-python
|
python
|
def _setup_args(info):
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
(int_rad, _, lut_fun, n_fact) = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0.0, 0.0, 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin, ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
|
@testing.requires_testing_data
def test_as_meg_type_evoked():
'Test interpolation of data on to virtual channels.'
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True, ecg=True, eog=True, include=['STI 014'], exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert all((ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
(info_from, info_to) = (evoked_from.info, evoked_to.info)
(args1, args2) = (_setup_args(info_from), _setup_args(info_to))
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
evoked = evoked.pick_channels(ch_names=ch_names[:10]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[(0, 1)] > 0.95)
virt_epochs = epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert all((ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| 5,587,401,729,087,093,000
|
Test interpolation of data on to virtual channels.
|
mne/forward/tests/test_field_interpolation.py
|
test_as_meg_type_evoked
|
0reza/mne-python
|
python
|
@testing.requires_testing_data
def test_as_meg_type_evoked():
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True, ecg=True, eog=True, include=['STI 014'], exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert all((ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
(info_from, info_to) = (evoked_from.info, evoked_to.info)
(args1, args2) = (_setup_args(info_from), _setup_args(info_to))
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
evoked = evoked.pick_channels(ch_names=ch_names[:10]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[(0, 1)] > 0.95)
virt_epochs = epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert all((ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
|
def dist_kl(p: Prob, q: Prob):
'Kullback-Leibler divergence between two probability distributions.'
kl_div = (p.p * (np.log((p.p + (p == 0))) - np.log((q.p + (p.p == 0)))))
return np.sum(kl_div)
| -8,010,159,052,281,958,000
|
Kullback-Leibler divergence between two probability distributions.
|
inferlo/generic/libdai_bp.py
|
dist_kl
|
InferLO/inferlo
|
python
|
def dist_kl(p: Prob, q: Prob):
kl_div = (p.p * (np.log((p.p + (p == 0))) - np.log((q.p + (p.p == 0)))))
return np.sum(kl_div)
|
def dist_linf(p: Prob, q: Prob):
'Distance between two probability distributions in L_infinity norm.'
return np.max(np.abs((p.p - q.p)))
| 5,676,502,167,310,320,000
|
Distance between two probability distributions in L_infinity norm.
|
inferlo/generic/libdai_bp.py
|
dist_linf
|
InferLO/inferlo
|
python
|
def dist_linf(p: Prob, q: Prob):
return np.max(np.abs((p.p - q.p)))
|
@staticmethod
def uniform(n):
'Creates unifom probability distribution.'
return Prob.same_value(n, (1.0 / n))
| -4,663,781,209,296,906,000
|
Creates unifom probability distribution.
|
inferlo/generic/libdai_bp.py
|
uniform
|
InferLO/inferlo
|
python
|
@staticmethod
def uniform(n):
return Prob.same_value(n, (1.0 / n))
|
@staticmethod
def same_value(n: int, val: float):
'Creates vector filled with the same value.'
return Prob((np.ones(n, dtype=np.float64) * val))
| 681,741,583,617,377,800
|
Creates vector filled with the same value.
|
inferlo/generic/libdai_bp.py
|
same_value
|
InferLO/inferlo
|
python
|
@staticmethod
def same_value(n: int, val: float):
return Prob((np.ones(n, dtype=np.float64) * val))
|
def fill(self, x):
'Sets all entries to x.'
self.p = (np.ones_like(self.p) * x)
| 1,609,897,422,729,735,000
|
Sets all entries to x.
|
inferlo/generic/libdai_bp.py
|
fill
|
InferLO/inferlo
|
python
|
def fill(self, x):
self.p = (np.ones_like(self.p) * x)
|
def clone(self):
'Makes a copy.'
return Prob(np.array(self.p))
| 8,396,041,533,749,117,000
|
Makes a copy.
|
inferlo/generic/libdai_bp.py
|
clone
|
InferLO/inferlo
|
python
|
def clone(self):
return Prob(np.array(self.p))
|
def normalize(self):
'Normalize distribution.'
self.p /= np.sum(self.p)
| 6,639,159,798,546,026,000
|
Normalize distribution.
|
inferlo/generic/libdai_bp.py
|
normalize
|
InferLO/inferlo
|
python
|
def normalize(self):
self.p /= np.sum(self.p)
|
def entropy(self) -> float:
'Calculate entropy of the distribution.'
return (- np.sum((self.p * np.log(self.p))))
| -1,429,585,937,468,360,000
|
Calculate entropy of the distribution.
|
inferlo/generic/libdai_bp.py
|
entropy
|
InferLO/inferlo
|
python
|
def entropy(self) -> float:
return (- np.sum((self.p * np.log(self.p))))
|
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
'Creates factor defining uniform distribution.'
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
| -8,068,446,661,493,057,000
|
Creates factor defining uniform distribution.
|
inferlo/generic/libdai_bp.py
|
uniform
|
InferLO/inferlo
|
python
|
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
|
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
'Converts inferlo.DiscreteFactor to LDFactor.'
rev_perm = list(range(len(f.var_idx)))[::(- 1)]
prob = f.values.transpose(rev_perm).reshape((- 1))
return LDFactor(f.model, f.var_idx, Prob(prob))
| -7,319,385,636,117,433,000
|
Converts inferlo.DiscreteFactor to LDFactor.
|
inferlo/generic/libdai_bp.py
|
from_inferlo_factor
|
InferLO/inferlo
|
python
|
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
rev_perm = list(range(len(f.var_idx)))[::(- 1)]
prob = f.values.transpose(rev_perm).reshape((- 1))
return LDFactor(f.model, f.var_idx, Prob(prob))
|
def to_inferlo_factor(self) -> DiscreteFactor:
'Converts LDFactor to inferlo.DiscreteFactor.'
sizes = [self.model.get_variable(i).domain.size() for i in self.var_idx[::(- 1)]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::(- 1)]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
| -7,055,429,566,873,699,000
|
Converts LDFactor to inferlo.DiscreteFactor.
|
inferlo/generic/libdai_bp.py
|
to_inferlo_factor
|
InferLO/inferlo
|
python
|
def to_inferlo_factor(self) -> DiscreteFactor:
sizes = [self.model.get_variable(i).domain.size() for i in self.var_idx[::(- 1)]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::(- 1)]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
|
def combine_with_factor(self, other: LDFactor, func: Callable[([float, float], float)]):
'Applies binary function to two factors.'
for i in other.var_idx:
assert (i in self.var_idx)
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
| -853,957,249,262,632,400
|
Applies binary function to two factors.
|
inferlo/generic/libdai_bp.py
|
combine_with_factor
|
InferLO/inferlo
|
python
|
def combine_with_factor(self, other: LDFactor, func: Callable[([float, float], float)]):
for i in other.var_idx:
assert (i in self.var_idx)
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
|
def marginal(self, new_var_idx, normed=True) -> LDFactor:
'Sums factor over some variables.'
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
| -300,902,764,208,707,500
|
Sums factor over some variables.
|
inferlo/generic/libdai_bp.py
|
marginal
|
InferLO/inferlo
|
python
|
def marginal(self, new_var_idx, normed=True) -> LDFactor:
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
|
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
'Eleiminates certain variables by finding maximum.'
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
| 1,158,098,587,500,319,000
|
Eleiminates certain variables by finding maximum.
|
inferlo/generic/libdai_bp.py
|
max_marginal
|
InferLO/inferlo
|
python
|
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
|
def clone(self):
'Makes a copy of this factor.'
return LDFactor(self.model, self.var_idx, self.p.clone())
| -1,412,512,557,047,017,000
|
Makes a copy of this factor.
|
inferlo/generic/libdai_bp.py
|
clone
|
InferLO/inferlo
|
python
|
def clone(self):
return LDFactor(self.model, self.var_idx, self.p.clone())
|
def _decode_value_index(self, idx):
'Returns dict from variable id to variable value.'
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = (idx % size)
idx //= size
return ans
| -4,562,561,243,723,303,400
|
Returns dict from variable id to variable value.
|
inferlo/generic/libdai_bp.py
|
_decode_value_index
|
InferLO/inferlo
|
python
|
def _decode_value_index(self, idx):
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = (idx % size)
idx //= size
return ans
|
@staticmethod
def infer(model, options=None):
'Runs inference BP algorithm for given model.\n\n Supports all options which libdai::BP supports. Refer to libDAI\n documentation for options descritpion.\n '
if (options is None):
options = {'tol': 1e-09, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
| -3,472,154,361,382,406,700
|
Runs inference BP algorithm for given model.
Supports all options which libdai::BP supports. Refer to libDAI
documentation for options descritpion.
|
inferlo/generic/libdai_bp.py
|
infer
|
InferLO/inferlo
|
python
|
@staticmethod
def infer(model, options=None):
'Runs inference BP algorithm for given model.\n\n Supports all options which libdai::BP supports. Refer to libDAI\n documentation for options descritpion.\n '
if (options is None):
options = {'tol': 1e-09, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
|
def _construct(self):
'Helper function for constructors.'
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(index=None, message=Prob.uniform(size), new_message=Prob.uniform(size), residual=0.0)
self._edges[i].append(new_ep)
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(LDFactor.uniform(self.model, self.factors[ii].var_idx))
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
| 5,862,436,295,012,486,000
|
Helper function for constructors.
|
inferlo/generic/libdai_bp.py
|
_construct
|
InferLO/inferlo
|
python
|
def _construct(self):
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(index=None, message=Prob.uniform(size), new_message=Prob.uniform(size), residual=0.0)
self._edges[i].append(new_ep)
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(LDFactor.uniform(self.model, self.factors[ii].var_idx))
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
|
def init(self):
'Initializes messages awith default values.'
c = (0.0 if self.logdomain else 1.0)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if (self.updates == 'SEQMAX'):
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
| 4,076,126,271,826,050,600
|
Initializes messages awith default values.
|
inferlo/generic/libdai_bp.py
|
init
|
InferLO/inferlo
|
python
|
def init(self):
c = (0.0 if self.logdomain else 1.0)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if (self.updates == 'SEQMAX'):
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
|
def find_max_residual(self):
'Find max residual.'
max_r = (- np.inf)
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if (self._edges[i][_I].residual > max_r):
max_r = self._edges[i][_I].residual
best_edge = (i, _I)
return best_edge
| -6,233,666,094,231,453,000
|
Find max residual.
|
inferlo/generic/libdai_bp.py
|
find_max_residual
|
InferLO/inferlo
|
python
|
def find_max_residual(self):
max_r = (- np.inf)
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if (self._edges[i][_I].residual > max_r):
max_r = self._edges[i][_I].residual
best_edge = (i, _I)
return best_edge
|
def _calc_incoming_message_product(self, ii: int, without_i: bool, i: int) -> Prob:
'Calculate the product of factor \x07 I and the incoming messages.\n\n If without_i == True, the message coming from variable i is omitted\n from the product.\n\n This function is used by calc_new_message and calc_belief_f.\n '
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
for j in self.nbF[ii]:
if (without_i and (j.node == i)):
continue
size = self._var_size(j.node)
default_val = (0.0 if self.logdomain else 1.0)
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if (J.node != ii):
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
| -888,266,374,908,817,400
|
Calculate the product of factor I and the incoming messages.
If without_i == True, the message coming from variable i is omitted
from the product.
This function is used by calc_new_message and calc_belief_f.
|
inferlo/generic/libdai_bp.py
|
_calc_incoming_message_product
|
InferLO/inferlo
|
python
|
def _calc_incoming_message_product(self, ii: int, without_i: bool, i: int) -> Prob:
'Calculate the product of factor \x07 I and the incoming messages.\n\n If without_i == True, the message coming from variable i is omitted\n from the product.\n\n This function is used by calc_new_message and calc_belief_f.\n '
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
for j in self.nbF[ii]:
if (without_i and (j.node == i)):
continue
size = self._var_size(j.node)
default_val = (0.0 if self.logdomain else 1.0)
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if (J.node != ii):
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
|
def run(self):
'Runs BP algorithm.'
tic = time.time()
max_diff = np.inf
while ((self._iters < self.maxiter) and (max_diff > self.tol) and ((time.time() - tic) < self.maxtime)):
if (self.updates == 'SEQMAX'):
if (self._iters == 0):
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for _ in range(len(self._update_seq)):
(i, _I) = self.find_max_residual()
self._update_message(i, _I)
for J in self.nbV[i]:
if (J.iter != _I):
for j in self.nbF[J.node]:
_J = j.dual
if (j != i):
self._calc_new_message(j.node, _J)
elif (self.updates == 'PARALL'):
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
if (self.updates == 'SEQRND'):
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
max_diff = (- np.inf)
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff, dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff, dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if (max_diff > self._maxdiff):
self._maxdiff = max_diff
return max_diff
| -772,364,498,801,806,700
|
Runs BP algorithm.
|
inferlo/generic/libdai_bp.py
|
run
|
InferLO/inferlo
|
python
|
def run(self):
tic = time.time()
max_diff = np.inf
while ((self._iters < self.maxiter) and (max_diff > self.tol) and ((time.time() - tic) < self.maxtime)):
if (self.updates == 'SEQMAX'):
if (self._iters == 0):
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for _ in range(len(self._update_seq)):
(i, _I) = self.find_max_residual()
self._update_message(i, _I)
for J in self.nbV[i]:
if (J.iter != _I):
for j in self.nbF[J.node]:
_J = j.dual
if (j != i):
self._calc_new_message(j.node, _J)
elif (self.updates == 'PARALL'):
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
if (self.updates == 'SEQRND'):
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
max_diff = (- np.inf)
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff, dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff, dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if (max_diff > self._maxdiff):
self._maxdiff = max_diff
return max_diff
|
def log_z(self) -> float:
'Calculates logarithm of the partition function.'
ans = 0.0
for i in range(self.nrVars):
ans += ((1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy())
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
| -1,830,153,131,984,670,200
|
Calculates logarithm of the partition function.
|
inferlo/generic/libdai_bp.py
|
log_z
|
InferLO/inferlo
|
python
|
def log_z(self) -> float:
ans = 0.0
for i in range(self.nrVars):
ans += ((1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy())
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
|
def marg_prob(self) -> np.ndarray:
'Calculates marginal probabilities.'
max_domain_size = np.max([self._var_size(i) for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
| 4,195,681,131,789,335,000
|
Calculates marginal probabilities.
|
inferlo/generic/libdai_bp.py
|
marg_prob
|
InferLO/inferlo
|
python
|
def marg_prob(self) -> np.ndarray:
max_domain_size = np.max([self._var_size(i) for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
|
def __init__(self, storage, move_scheme=None, sample_set=None, initialize=True):
'\n Parameters\n ----------\n storage : :class:`openpathsampling.storage.Storage`\n the storage where all results should be stored in\n move_scheme : :class:`openpathsampling.MoveScheme`\n the move scheme used for the pathsampling cycle\n sample_set : :class:`openpathsampling.SampleSet`\n the initial SampleSet for the Simulator\n initialize : bool\n if `False` the new PathSimulator will continue at the step and\n not create a new SampleSet object to cut the connection to previous\n steps\n '
super(PathSampling, self).__init__(storage)
self.move_scheme = move_scheme
if (move_scheme is not None):
self.root_mover = move_scheme.move_decision_tree()
self._mover = paths.PathSimulatorMover(self.root_mover, self)
else:
self.root_mover = None
self._mover = None
initialization_logging(init_log, self, ['move_scheme', 'sample_set'])
self.live_visualizer = None
self.status_update_frequency = 1
if initialize:
samples = []
if (sample_set is not None):
for sample in sample_set:
samples.append(sample.copy_reset())
self.sample_set = paths.SampleSet(samples)
mcstep = MCStep(simulation=self, mccycle=self.step, active=self.sample_set, change=paths.AcceptedSampleMoveChange(self.sample_set.samples))
self._current_step = mcstep
else:
self.sample_set = sample_set
self._current_step = None
self.root = self.sample_set
if (self.storage is not None):
template_trajectory = self.sample_set.samples[0].trajectory
self.storage.save(template_trajectory)
self.storage.save([self.move_scheme, self.root_mover, self._mover])
self.save_current_step()
| 8,646,892,375,445,471,000
|
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage where all results should be stored in
move_scheme : :class:`openpathsampling.MoveScheme`
the move scheme used for the pathsampling cycle
sample_set : :class:`openpathsampling.SampleSet`
the initial SampleSet for the Simulator
initialize : bool
if `False` the new PathSimulator will continue at the step and
not create a new SampleSet object to cut the connection to previous
steps
|
openpathsampling/pathsimulators/path_sampling.py
|
__init__
|
bolhuis/openpathsampling
|
python
|
def __init__(self, storage, move_scheme=None, sample_set=None, initialize=True):
'\n Parameters\n ----------\n storage : :class:`openpathsampling.storage.Storage`\n the storage where all results should be stored in\n move_scheme : :class:`openpathsampling.MoveScheme`\n the move scheme used for the pathsampling cycle\n sample_set : :class:`openpathsampling.SampleSet`\n the initial SampleSet for the Simulator\n initialize : bool\n if `False` the new PathSimulator will continue at the step and\n not create a new SampleSet object to cut the connection to previous\n steps\n '
super(PathSampling, self).__init__(storage)
self.move_scheme = move_scheme
if (move_scheme is not None):
self.root_mover = move_scheme.move_decision_tree()
self._mover = paths.PathSimulatorMover(self.root_mover, self)
else:
self.root_mover = None
self._mover = None
initialization_logging(init_log, self, ['move_scheme', 'sample_set'])
self.live_visualizer = None
self.status_update_frequency = 1
if initialize:
samples = []
if (sample_set is not None):
for sample in sample_set:
samples.append(sample.copy_reset())
self.sample_set = paths.SampleSet(samples)
mcstep = MCStep(simulation=self, mccycle=self.step, active=self.sample_set, change=paths.AcceptedSampleMoveChange(self.sample_set.samples))
self._current_step = mcstep
else:
self.sample_set = sample_set
self._current_step = None
self.root = self.sample_set
if (self.storage is not None):
template_trajectory = self.sample_set.samples[0].trajectory
self.storage.save(template_trajectory)
self.storage.save([self.move_scheme, self.root_mover, self._mover])
self.save_current_step()
|
def save_current_step(self):
'\n Save the current step to the storage\n\n '
if ((self.storage is not None) and (self._current_step is not None)):
try:
self.storage.stash(self._current_step)
except AttributeError:
self.storage.steps.save(self._current_step)
| -6,005,775,065,783,409,000
|
Save the current step to the storage
|
openpathsampling/pathsimulators/path_sampling.py
|
save_current_step
|
bolhuis/openpathsampling
|
python
|
def save_current_step(self):
'\n \n\n '
if ((self.storage is not None) and (self._current_step is not None)):
try:
self.storage.stash(self._current_step)
except AttributeError:
self.storage.steps.save(self._current_step)
|
@classmethod
def from_step(cls, storage, step, initialize=True):
'\n\n Parameters\n ----------\n storage : :class:`openpathsampling.storage.Storage`\n the storage to be used to hold the simulation results\n step : :class:`openpathsampling.MCStep`\n the step used to fill the initial parameters\n initialize : bool\n if `False` the new PathSimulator will continue at the given step and\n not create a new SampleSet object to cut the connection to previous\n steps.\n\n Returns\n -------\n :class:`openpathsampling.PathSampling`\n the new simulator object\n '
obj = cls(storage, step.simulation.move_scheme, step.sample_set, initialize=initialize)
return obj
| 4,474,719,290,868,421,600
|
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage to be used to hold the simulation results
step : :class:`openpathsampling.MCStep`
the step used to fill the initial parameters
initialize : bool
if `False` the new PathSimulator will continue at the given step and
not create a new SampleSet object to cut the connection to previous
steps.
Returns
-------
:class:`openpathsampling.PathSampling`
the new simulator object
|
openpathsampling/pathsimulators/path_sampling.py
|
from_step
|
bolhuis/openpathsampling
|
python
|
@classmethod
def from_step(cls, storage, step, initialize=True):
'\n\n Parameters\n ----------\n storage : :class:`openpathsampling.storage.Storage`\n the storage to be used to hold the simulation results\n step : :class:`openpathsampling.MCStep`\n the step used to fill the initial parameters\n initialize : bool\n if `False` the new PathSimulator will continue at the given step and\n not create a new SampleSet object to cut the connection to previous\n steps.\n\n Returns\n -------\n :class:`openpathsampling.PathSampling`\n the new simulator object\n '
obj = cls(storage, step.simulation.move_scheme, step.sample_set, initialize=initialize)
return obj
|
def restart_at_step(self, step, storage=None):
'\n Continue with a loaded pathsampling at a given step\n\n Notes\n -----\n You can only continue from a step that is compatible in the sense\n that it was previously generated from the pathsampling instance.\n\n If you want to switch the move scheme you need to create a new\n pathsampling instance. You can do so with the constructor or using\n the classmethod `from_step` which simplifies the setup process\n\n Parameters\n ----------\n step : :class:`MCStep`\n the step to be continued from. You are always free to chose any step\n which can be used to fork a simulation but for analysis you may\n only use one path of steps.\n storage : :class:`Storage`\n If given this will change the storage used to store the generated\n steps\n\n '
if (step.simulation is not self):
raise RuntimeWarning('Trying to continue from other step. Please use the `.from_step` method to create a new PathSampling object instead.')
if (storage is not None):
self.storage = storage
self.step = step.mccycle
self.sample_set = step.active
self.root = step.simulation.root
self._current_step = step
| -583,609,655,033,610,800
|
Continue with a loaded pathsampling at a given step
Notes
-----
You can only continue from a step that is compatible in the sense
that it was previously generated from the pathsampling instance.
If you want to switch the move scheme you need to create a new
pathsampling instance. You can do so with the constructor or using
the classmethod `from_step` which simplifies the setup process
Parameters
----------
step : :class:`MCStep`
the step to be continued from. You are always free to chose any step
which can be used to fork a simulation but for analysis you may
only use one path of steps.
storage : :class:`Storage`
If given this will change the storage used to store the generated
steps
|
openpathsampling/pathsimulators/path_sampling.py
|
restart_at_step
|
bolhuis/openpathsampling
|
python
|
def restart_at_step(self, step, storage=None):
'\n Continue with a loaded pathsampling at a given step\n\n Notes\n -----\n You can only continue from a step that is compatible in the sense\n that it was previously generated from the pathsampling instance.\n\n If you want to switch the move scheme you need to create a new\n pathsampling instance. You can do so with the constructor or using\n the classmethod `from_step` which simplifies the setup process\n\n Parameters\n ----------\n step : :class:`MCStep`\n the step to be continued from. You are always free to chose any step\n which can be used to fork a simulation but for analysis you may\n only use one path of steps.\n storage : :class:`Storage`\n If given this will change the storage used to store the generated\n steps\n\n '
if (step.simulation is not self):
raise RuntimeWarning('Trying to continue from other step. Please use the `.from_step` method to create a new PathSampling object instead.')
if (storage is not None):
self.storage = storage
self.step = step.mccycle
self.sample_set = step.active
self.root = step.simulation.root
self._current_step = step
|
def run_until_decorrelated(self, time_reversal=True):
'Run until all trajectories are decorrelated.\n\n This runs until all the replicas in ``self.sample_set`` have\n decorrelated from their initial conditions. "Decorrelated" here is\n meant in the sense commonly used in one-way shooting: this runs\n until no configurations from the original trajectories remain.\n '
originals = {s.replica: s.trajectory for s in self.sample_set}
current = self.sample_set
original_output_stream = self.output_stream
self.output_stream = open(os.devnull, 'w')
def n_correlated(sample_set, originals):
return sum([originals[r].is_correlated(sample_set[r], time_reversal) for r in originals])
original_output_stream.write('Decorrelating trajectories....\n')
to_decorrelate = n_correlated(self.sample_set, originals)
while to_decorrelate:
out_str = 'Step {}: {} of {} trajectories still correlated\n'
paths.tools.refresh_output(out_str.format((self.step + 1), to_decorrelate, len(originals)), refresh=False, output_stream=original_output_stream)
self.run(1)
to_decorrelate = n_correlated(self.sample_set, originals)
paths.tools.refresh_output('Step {}: All trajectories decorrelated!\n'.format((self.step + 1)), refresh=False, output_stream=original_output_stream)
self.output_stream = original_output_stream
| 5,267,402,370,953,656,000
|
Run until all trajectories are decorrelated.
This runs until all the replicas in ``self.sample_set`` have
decorrelated from their initial conditions. "Decorrelated" here is
meant in the sense commonly used in one-way shooting: this runs
until no configurations from the original trajectories remain.
|
openpathsampling/pathsimulators/path_sampling.py
|
run_until_decorrelated
|
bolhuis/openpathsampling
|
python
|
def run_until_decorrelated(self, time_reversal=True):
'Run until all trajectories are decorrelated.\n\n This runs until all the replicas in ``self.sample_set`` have\n decorrelated from their initial conditions. "Decorrelated" here is\n meant in the sense commonly used in one-way shooting: this runs\n until no configurations from the original trajectories remain.\n '
originals = {s.replica: s.trajectory for s in self.sample_set}
current = self.sample_set
original_output_stream = self.output_stream
self.output_stream = open(os.devnull, 'w')
def n_correlated(sample_set, originals):
return sum([originals[r].is_correlated(sample_set[r], time_reversal) for r in originals])
original_output_stream.write('Decorrelating trajectories....\n')
to_decorrelate = n_correlated(self.sample_set, originals)
while to_decorrelate:
out_str = 'Step {}: {} of {} trajectories still correlated\n'
paths.tools.refresh_output(out_str.format((self.step + 1), to_decorrelate, len(originals)), refresh=False, output_stream=original_output_stream)
self.run(1)
to_decorrelate = n_correlated(self.sample_set, originals)
paths.tools.refresh_output('Step {}: All trajectories decorrelated!\n'.format((self.step + 1)), refresh=False, output_stream=original_output_stream)
self.output_stream = original_output_stream
|
def create_user(self, email, password=None, **extra_fields):
'\n Creates and saves a User with the given email and\n password.\n '
now = timezone.now()
if (not email):
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(email=email, is_staff=False, is_active=True, is_superuser=False, last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
| -6,193,041,823,426,439,000
|
Creates and saves a User with the given email and
password.
|
src/oscar/apps/customer/abstract_models.py
|
create_user
|
Abirami15/django-oscar
|
python
|
def create_user(self, email, password=None, **extra_fields):
'\n Creates and saves a User with the given email and\n password.\n '
now = timezone.now()
if (not email):
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(email=email, is_staff=False, is_active=True, is_superuser=False, last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
|
def get_full_name(self):
'\n Return the first_name plus the last_name, with a space in between.\n '
full_name = ('%s %s' % (self.first_name, self.last_name))
return full_name.strip()
| 102,124,964,758,521,170
|
Return the first_name plus the last_name, with a space in between.
|
src/oscar/apps/customer/abstract_models.py
|
get_full_name
|
Abirami15/django-oscar
|
python
|
def get_full_name(self):
'\n \n '
full_name = ('%s %s' % (self.first_name, self.last_name))
return full_name.strip()
|
def get_short_name(self):
'\n Return the short name for the user.\n '
return self.first_name
| -62,519,838,540,969,440
|
Return the short name for the user.
|
src/oscar/apps/customer/abstract_models.py
|
get_short_name
|
Abirami15/django-oscar
|
python
|
def get_short_name(self):
'\n \n '
return self.first_name
|
def email_user(self, subject, message, from_email=None, **kwargs):
'\n Send an email to this user.\n '
send_mail(subject, message, from_email, [self.email], **kwargs)
| -3,977,850,786,468,333,600
|
Send an email to this user.
|
src/oscar/apps/customer/abstract_models.py
|
email_user
|
Abirami15/django-oscar
|
python
|
def email_user(self, subject, message, from_email=None, **kwargs):
'\n \n '
send_mail(subject, message, from_email, [self.email], **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.