code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def content_loss(content_layer, generated_layer):
return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))
<|reserved_special_token_0|>
def get_gram_matrix(matrix, num_filters):
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def content_loss(content_layer, generated_layer):
return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))
def style_loss(style_layers, generated_layers, weights):
layer_losses = []
for index in [0, 1, 2, 3]:
reference_layer = style_layers[index]
generated_image_layer = generated_layers[index]
N = reference_layer.shape[3]
M = reference_layer.shape[1] * reference_layer.shape[2]
layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **
2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -
get_gram_matrix(generated_image_layer, N))))
return sum(layer_losses)
def get_gram_matrix(matrix, num_filters):
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
<|reserved_special_token_1|>
import tensorflow as tf
from vgg16 import vgg16
def content_loss(content_layer, generated_layer):
return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))
def style_loss(style_layers, generated_layers, weights):
layer_losses = []
for index in [0, 1, 2, 3]:
reference_layer = style_layers[index]
generated_image_layer = generated_layers[index]
N = reference_layer.shape[3]
M = reference_layer.shape[1] * reference_layer.shape[2]
layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **
2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -
get_gram_matrix(generated_image_layer, N))))
return sum(layer_losses)
def get_gram_matrix(matrix, num_filters):
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
<|reserved_special_token_1|>
import tensorflow as tf
from vgg16 import vgg16
def content_loss(content_layer, generated_layer):
# sess.run(vgg_net.image.assign(generated_image))
# now we define the loss as the difference between the reference activations and
# the generated image activations in the specified layer
# return 1/2 * tf.nn.l2_loss(content_layer - generated_layer)
return tf.scalar_mul(.5, tf.nn.l2_loss(content_layer - generated_layer))
def style_loss(style_layers, generated_layers, weights):
layer_losses = []
for index in [0, 1, 2, 3]:
reference_layer = style_layers[index]
generated_image_layer = generated_layers[index]
N = reference_layer.shape[3]
M = reference_layer.shape[1] * reference_layer.shape[2]
# layer_losses.append(weights[index] * (4 / (M**2 * N**2)) * tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N)))
layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M**2 * N**2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N))))
return sum(layer_losses)
def get_gram_matrix(matrix, num_filters):
# first vectorize the matrix
matrix_vectorized = tf.reshape(matrix, [-1, num_filters])
# then calculate the gram by multiplying the vector by its transpose
return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)
# def run_vgg(sess, image):
# print "making the template", image.shape
# imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
# net = vgg16(imgs, 'vgg16_weights.npz', sess)
# print "model loaded"
# # net = VGG16({'data': image})
# # net.load(model_data_path, session)
# # session.run(net.get_output(), feed_dict={input_node: image})
# sess.run(net.probs, feed_dict={net.imgs: image})
# return net
|
flexible
|
{
"blob_id": "f92b939bf9813e5c78bc450ff270d5fb6171792a",
"index": 4810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\n<mask token>\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-3": "<mask token>\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\ndef style_loss(style_layers, generated_layers, weights):\n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **\n 2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -\n get_gram_matrix(generated_image_layer, N))))\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-4": "import tensorflow as tf\nfrom vgg16 import vgg16\n\n\ndef content_loss(content_layer, generated_layer):\n return tf.scalar_mul(0.5, tf.nn.l2_loss(content_layer - generated_layer))\n\n\ndef style_loss(style_layers, generated_layers, weights):\n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M ** 2 * N **\n 2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) -\n get_gram_matrix(generated_image_layer, N))))\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n",
"step-5": "import tensorflow as tf\nfrom vgg16 import vgg16\n\ndef content_loss(content_layer, generated_layer):\n # sess.run(vgg_net.image.assign(generated_image))\n\n # now we define the loss as the difference between the reference activations and \n # the generated image activations in the specified layer\n # return 1/2 * tf.nn.l2_loss(content_layer - generated_layer)\n return tf.scalar_mul(.5, tf.nn.l2_loss(content_layer - generated_layer))\n\ndef style_loss(style_layers, generated_layers, weights):\n \n layer_losses = []\n for index in [0, 1, 2, 3]:\n reference_layer = style_layers[index]\n generated_image_layer = generated_layers[index]\n\n N = reference_layer.shape[3]\n M = reference_layer.shape[1] * reference_layer.shape[2]\n # layer_losses.append(weights[index] * (4 / (M**2 * N**2)) * tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N)))\n layer_losses.append(tf.scalar_mul(weights[index] * 4 / (M**2 * N**2), tf.nn.l2_loss(get_gram_matrix(reference_layer, N) - get_gram_matrix(generated_image_layer, N))))\n\n return sum(layer_losses)\n\n\ndef get_gram_matrix(matrix, num_filters):\n # first vectorize the matrix\n matrix_vectorized = tf.reshape(matrix, [-1, num_filters])\n # then calculate the gram by multiplying the vector by its transpose\n return tf.matmul(tf.transpose(matrix_vectorized), matrix_vectorized)\n\n\n# def run_vgg(sess, image):\n# print \"making the template\", image.shape\n# imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])\n# net = vgg16(imgs, 'vgg16_weights.npz', sess)\n# print \"model loaded\"\n# # net = VGG16({'data': image})\n# # net.load(model_data_path, session)\n# # session.run(net.get_output(), feed_dict={input_node: image})\n# sess.run(net.probs, feed_dict={net.imgs: image})\n# return net\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from rest_framework import serializers
from .models import Twit, Comment, Message
from django.contrib.auth.models import User
class TwitSerializer(serializers.ModelSerializer):
class Meta:
model = Twit
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('sender', 'receiver', 'content', 'creation_date')
|
normal
|
{
"blob_id": "536a67935527eb99bc0424613c9b931401db0b06",
"index": 6461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-3": "<mask token>\n\n\nclass TwitSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-4": "from rest_framework import serializers\nfrom .models import Twit, Comment, Message\nfrom django.contrib.auth.models import User\n\n\nclass TwitSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-5": "from rest_framework import serializers\nfrom .models import Twit, Comment, Message\nfrom django.contrib.auth.models import User\n\nclass TwitSerializer(serializers.ModelSerializer):\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Message\n fields = ('sender', 'receiver', 'content', 'creation_date')\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class BasicMC(commands.Cog):
<|reserved_special_token_0|>
@commands.command(name='stealskin', aliases=['skinsteal', 'skin'])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(
f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get('id')
if uuid is None:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'
)
content = json.loads(await response.text())
if 'error' in content:
if content['error'] == 'TooManyRequestsException':
await ctx.send(embed=discord.Embed(color=await self.bot.cc(
ctx.author.id), description=
"Oops, we're being ratelimited by the Mojang API, try again later!"
))
return
if len(content['properties']) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
"We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content['properties'][0]['value'])
try:
url = json.loads(undec)['textures']['SKIN']['url']
except Exception:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
'An error occurred while fetching that skin!'))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"""{gamertag}'s skin
[**[Download]**]({url})""")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')
await ctx.send(embed=skin_embed)
@commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post('https://api.mojang.com/profiles/minecraft'
, json=[gamertag])
j = json.loads(await r.text())
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description='That user could not be found.'))
return
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name='uuidtoname', aliases=['getgamertag'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(
f'https://api.mojang.com/user/profiles/{uuid}/names')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]['name']
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f'{uuid}: ``{name}``'))
@commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',
'cc'])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=
"""Text in Minecraft can be formatted using different codes and
the section (``§``) sign."""
)
embed.set_author(name='Minecraft Formatting Codes')
embed.add_field(name='Color Codes', value=
"""<:red:697541699706028083> **Red** ``§c``
<:yellow:697541699743776808> **Yellow** ``§e``
<:green:697541699316219967> **Green** ``§a``
<:aqua:697541699173613750> **Aqua** ``§b``
<:blue:697541699655696787> **Blue** ``§9``
<:light_purple:697541699546775612> **Light Purple** ``§d``
<:white:697541699785719838> **White** ``§f``
<:gray:697541699534061630> **Gray** ``§7``
"""
)
embed.add_field(name='Color Codes', value=
"""<:dark_red:697541699488055426> **Dark Red** ``§4``
<:gold:697541699639050382> **Gold** ``§6``
<:dark_green:697541699500769420> **Dark Green** ``§2``
<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``
<:dark_blue:697541699488055437> **Dark Blue** ``§1``
<:dark_purple:697541699437592666> **Dark Purple** ``§5``
<:dark_gray:697541699471278120> **Dark Gray** ``§8``
<:black:697541699496444025> **Black** ``§0``
"""
)
embed.add_field(name='Formatting Codes', value=
"""<:bold:697541699488186419> **Bold** ``§l``
<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``
<:underline:697541699806953583> __Underline__ ``§n``
<:italic:697541699152379995> *Italic* ``§o``
<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``
<:reset:697541699697639446> Reset ``§r``
"""
)
await ctx.send(embed=embed)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name='stealskin', aliases=['skinsteal', 'skin'])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(
f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get('id')
if uuid is None:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'
)
content = json.loads(await response.text())
if 'error' in content:
if content['error'] == 'TooManyRequestsException':
await ctx.send(embed=discord.Embed(color=await self.bot.cc(
ctx.author.id), description=
"Oops, we're being ratelimited by the Mojang API, try again later!"
))
return
if len(content['properties']) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
"We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content['properties'][0]['value'])
try:
url = json.loads(undec)['textures']['SKIN']['url']
except Exception:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
'An error occurred while fetching that skin!'))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"""{gamertag}'s skin
[**[Download]**]({url})""")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')
await ctx.send(embed=skin_embed)
@commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post('https://api.mojang.com/profiles/minecraft'
, json=[gamertag])
j = json.loads(await r.text())
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description='That user could not be found.'))
return
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name='uuidtoname', aliases=['getgamertag'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(
f'https://api.mojang.com/user/profiles/{uuid}/names')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]['name']
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f'{uuid}: ``{name}``'))
@commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',
'cc'])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=
"""Text in Minecraft can be formatted using different codes and
the section (``§``) sign."""
)
embed.set_author(name='Minecraft Formatting Codes')
embed.add_field(name='Color Codes', value=
"""<:red:697541699706028083> **Red** ``§c``
<:yellow:697541699743776808> **Yellow** ``§e``
<:green:697541699316219967> **Green** ``§a``
<:aqua:697541699173613750> **Aqua** ``§b``
<:blue:697541699655696787> **Blue** ``§9``
<:light_purple:697541699546775612> **Light Purple** ``§d``
<:white:697541699785719838> **White** ``§f``
<:gray:697541699534061630> **Gray** ``§7``
"""
)
embed.add_field(name='Color Codes', value=
"""<:dark_red:697541699488055426> **Dark Red** ``§4``
<:gold:697541699639050382> **Gold** ``§6``
<:dark_green:697541699500769420> **Dark Green** ``§2``
<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``
<:dark_blue:697541699488055437> **Dark Blue** ``§1``
<:dark_purple:697541699437592666> **Dark Purple** ``§5``
<:dark_gray:697541699471278120> **Dark Gray** ``§8``
<:black:697541699496444025> **Black** ``§0``
"""
)
embed.add_field(name='Formatting Codes', value=
"""<:bold:697541699488186419> **Bold** ``§l``
<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``
<:underline:697541699806953583> __Underline__ ``§n``
<:italic:697541699152379995> *Italic* ``§o``
<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``
<:reset:697541699697639446> Reset ``§r``
"""
)
await ctx.send(embed=embed)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name='stealskin', aliases=['skinsteal', 'skin'])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(
f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get('id')
if uuid is None:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'
)
content = json.loads(await response.text())
if 'error' in content:
if content['error'] == 'TooManyRequestsException':
await ctx.send(embed=discord.Embed(color=await self.bot.cc(
ctx.author.id), description=
"Oops, we're being ratelimited by the Mojang API, try again later!"
))
return
if len(content['properties']) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
"We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content['properties'][0]['value'])
try:
url = json.loads(undec)['textures']['SKIN']['url']
except Exception:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
'An error occurred while fetching that skin!'))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"""{gamertag}'s skin
[**[Download]**]({url})""")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')
await ctx.send(embed=skin_embed)
@commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post('https://api.mojang.com/profiles/minecraft'
, json=[gamertag])
j = json.loads(await r.text())
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description='That user could not be found.'))
return
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name='uuidtoname', aliases=['getgamertag'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(
f'https://api.mojang.com/user/profiles/{uuid}/names')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]['name']
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f'{uuid}: ``{name}``'))
@commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',
'cc'])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=
"""Text in Minecraft can be formatted using different codes and
the section (``§``) sign."""
)
embed.set_author(name='Minecraft Formatting Codes')
embed.add_field(name='Color Codes', value=
"""<:red:697541699706028083> **Red** ``§c``
<:yellow:697541699743776808> **Yellow** ``§e``
<:green:697541699316219967> **Green** ``§a``
<:aqua:697541699173613750> **Aqua** ``§b``
<:blue:697541699655696787> **Blue** ``§9``
<:light_purple:697541699546775612> **Light Purple** ``§d``
<:white:697541699785719838> **White** ``§f``
<:gray:697541699534061630> **Gray** ``§7``
"""
)
embed.add_field(name='Color Codes', value=
"""<:dark_red:697541699488055426> **Dark Red** ``§4``
<:gold:697541699639050382> **Gold** ``§6``
<:dark_green:697541699500769420> **Dark Green** ``§2``
<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``
<:dark_blue:697541699488055437> **Dark Blue** ``§1``
<:dark_purple:697541699437592666> **Dark Purple** ``§5``
<:dark_gray:697541699471278120> **Dark Gray** ``§8``
<:black:697541699496444025> **Black** ``§0``
"""
)
embed.add_field(name='Formatting Codes', value=
"""<:bold:697541699488186419> **Bold** ``§l``
<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``
<:underline:697541699806953583> __Underline__ ``§n``
<:italic:697541699152379995> *Italic* ``§o``
<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``
<:reset:697541699697639446> Reset ``§r``
"""
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
<|reserved_special_token_1|>
import aiohttp
import asyncio
import base64
import discord
import json
from discord.ext import commands
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name='stealskin', aliases=['skinsteal', 'skin'])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(
f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get('id')
if uuid is None:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'
)
content = json.loads(await response.text())
if 'error' in content:
if content['error'] == 'TooManyRequestsException':
await ctx.send(embed=discord.Embed(color=await self.bot.cc(
ctx.author.id), description=
"Oops, we're being ratelimited by the Mojang API, try again later!"
))
return
if len(content['properties']) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
"We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content['properties'][0]['value'])
try:
url = json.loads(undec)['textures']['SKIN']['url']
except Exception:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=
'An error occurred while fetching that skin!'))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"""{gamertag}'s skin
[**[Download]**]({url})""")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')
await ctx.send(embed=skin_embed)
@commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post('https://api.mojang.com/profiles/minecraft'
, json=[gamertag])
j = json.loads(await r.text())
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description='That user could not be found.'))
return
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name='uuidtoname', aliases=['getgamertag'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(
f'https://api.mojang.com/user/profiles/{uuid}/names')
if response.status == 204:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]['name']
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.
author.id), description=f'{uuid}: ``{name}``'))
@commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',
'cc'])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=
"""Text in Minecraft can be formatted using different codes and
the section (``§``) sign."""
)
embed.set_author(name='Minecraft Formatting Codes')
embed.add_field(name='Color Codes', value=
"""<:red:697541699706028083> **Red** ``§c``
<:yellow:697541699743776808> **Yellow** ``§e``
<:green:697541699316219967> **Green** ``§a``
<:aqua:697541699173613750> **Aqua** ``§b``
<:blue:697541699655696787> **Blue** ``§9``
<:light_purple:697541699546775612> **Light Purple** ``§d``
<:white:697541699785719838> **White** ``§f``
<:gray:697541699534061630> **Gray** ``§7``
"""
)
embed.add_field(name='Color Codes', value=
"""<:dark_red:697541699488055426> **Dark Red** ``§4``
<:gold:697541699639050382> **Gold** ``§6``
<:dark_green:697541699500769420> **Dark Green** ``§2``
<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``
<:dark_blue:697541699488055437> **Dark Blue** ``§1``
<:dark_purple:697541699437592666> **Dark Purple** ``§5``
<:dark_gray:697541699471278120> **Dark Gray** ``§8``
<:black:697541699496444025> **Black** ``§0``
"""
)
embed.add_field(name='Formatting Codes', value=
"""<:bold:697541699488186419> **Bold** ``§l``
<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``
<:underline:697541699806953583> __Underline__ ``§n``
<:italic:697541699152379995> *Italic* ``§o``
<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``
<:reset:697541699697639446> Reset ``§r``
"""
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
<|reserved_special_token_1|>
import aiohttp
import asyncio
import base64
import discord
import json
from discord.ext import commands
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="stealskin", aliases=["skinsteal", "skin"])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(f"https://api.mojang.com/users/profiles/minecraft/{gamertag}")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get("id")
if uuid is None:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false")
content = json.loads(await response.text())
if "error" in content:
if content["error"] == "TooManyRequestsException":
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Oops, we're being ratelimited by the Mojang API, try again later!"))
return
if len(content["properties"]) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content["properties"][0]["value"])
try:
url = json.loads(undec)["textures"]["SKIN"]["url"]
except Exception:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="An error occurred while fetching that skin!"))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"{gamertag}'s skin\n[**[Download]**]({url})")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f"https://mc-heads.net/body/{gamertag}")
await ctx.send(embed=skin_embed)
@commands.command(name="nametouuid", aliases=["uuid", "getuuid"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post("https://api.mojang.com/profiles/minecraft", json=[gamertag])
j = json.loads(await r.text()) # [0]['id']
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="That user could not be found."))
return
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name="uuidtoname", aliases=["getgamertag"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]["name"]
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{uuid}: ``{name}``"))
@commands.command(name="colorcodes", aliases=["mccolorcodes", "colors", "cc"])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.")
embed.set_author(name="Minecraft Formatting Codes")
embed.add_field(name="Color Codes", value="<:red:697541699706028083> **Red** ``§c``\n"
"<:yellow:697541699743776808> **Yellow** ``§e``\n"
"<:green:697541699316219967> **Green** ``§a``\n"
"<:aqua:697541699173613750> **Aqua** ``§b``\n"
"<:blue:697541699655696787> **Blue** ``§9``\n"
"<:light_purple:697541699546775612> **Light Purple** ``§d``\n"
"<:white:697541699785719838> **White** ``§f``\n"
"<:gray:697541699534061630> **Gray** ``§7``\n")
embed.add_field(name="Color Codes", value="<:dark_red:697541699488055426> **Dark Red** ``§4``\n"
"<:gold:697541699639050382> **Gold** ``§6``\n"
"<:dark_green:697541699500769420> **Dark Green** ``§2``\n"
"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n"
"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n"
"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n"
"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n"
"<:black:697541699496444025> **Black** ``§0``\n")
embed.add_field(name="Formatting Codes", value="<:bold:697541699488186419> **Bold** ``§l``\n"
"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n"
"<:underline:697541699806953583> __Underline__ ``§n``\n"
"<:italic:697541699152379995> *Italic* ``§o``\n"
"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n"
"<:reset:697541699697639446> Reset ``§r``\n")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
|
flexible
|
{
"blob_id": "a6f242a0443ffbad835f86098b70ede41c03515b",
"index": 7652,
"step-1": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n <mask token>\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-4": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-5": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n self.session = aiohttp.ClientSession()\n\n @commands.command(name=\"stealskin\", aliases=[\"skinsteal\", \"skin\"])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(f\"https://api.mojang.com/users/profiles/minecraft/{gamertag}\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get(\"id\")\n if uuid is None:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f\"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false\")\n content = json.loads(await response.text())\n if \"error\" in content:\n if content[\"error\"] == \"TooManyRequestsException\":\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Oops, we're being ratelimited by the Mojang API, try again later!\"))\n return\n if len(content[\"properties\"]) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content[\"properties\"][0][\"value\"])\n try:\n url = json.loads(undec)[\"textures\"][\"SKIN\"][\"url\"]\n except Exception:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"An error occurred while fetching that skin!\"))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"{gamertag}'s skin\\n[**[Download]**]({url})\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f\"https://mc-heads.net/body/{gamertag}\")\n await ctx.send(embed=skin_embed)\n\n @commands.command(name=\"nametouuid\", aliases=[\"uuid\", \"getuuid\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post(\"https://api.mojang.com/profiles/minecraft\", json=[gamertag])\n j = json.loads(await r.text()) # [0]['id']\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"That user could not be found.\"))\n return\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name=\"uuidtoname\", aliases=[\"getgamertag\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(f\"https://api.mojang.com/user/profiles/{uuid}/names\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1][\"name\"]\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{uuid}: ``{name}``\"))\n\n @commands.command(name=\"colorcodes\", aliases=[\"mccolorcodes\", \"colors\", \"cc\"])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Text in Minecraft can be formatted using different codes and\\nthe section (``§``) sign.\")\n embed.set_author(name=\"Minecraft Formatting Codes\")\n embed.add_field(name=\"Color Codes\", value=\"<:red:697541699706028083> **Red** ``§c``\\n\"\n \"<:yellow:697541699743776808> **Yellow** ``§e``\\n\"\n \"<:green:697541699316219967> **Green** ``§a``\\n\"\n \"<:aqua:697541699173613750> **Aqua** ``§b``\\n\"\n \"<:blue:697541699655696787> **Blue** ``§9``\\n\"\n \"<:light_purple:697541699546775612> **Light Purple** ``§d``\\n\"\n \"<:white:697541699785719838> **White** ``§f``\\n\"\n \"<:gray:697541699534061630> **Gray** ``§7``\\n\")\n embed.add_field(name=\"Color Codes\", value=\"<:dark_red:697541699488055426> **Dark Red** ``§4``\\n\"\n \"<:gold:697541699639050382> **Gold** ``§6``\\n\"\n \"<:dark_green:697541699500769420> **Dark Green** ``§2``\\n\"\n \"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\\n\"\n \"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\\n\"\n \"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\\n\"\n \"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\\n\"\n \"<:black:697541699496444025> **Black** ``§0``\\n\")\n embed.add_field(name=\"Formatting Codes\", value=\"<:bold:697541699488186419> **Bold** ``§l``\\n\"\n \"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\\n\"\n \"<:underline:697541699806953583> __Underline__ ``§n``\\n\"\n \"<:italic:697541699152379995> *Italic* ``§o``\\n\"\n \"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\\n\"\n \"<:reset:697541699697639446> Reset ``§r``\\n\")\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert os.path.exists(fname) and os.path.isfile(fname
), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert os.path.exists(dest) and os.path.isfile(dest
), 'Cannot proceed without the dest file in process_the_source().'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert os.path.exists(fname) and os.path.isfile(fname
), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert os.path.exists(dest) and os.path.isfile(dest
), 'Cannot proceed without the dest file in process_the_source().'
if __name__ == '__main__':
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'
assert os.path.exists(root) and os.path.isdir(root
), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if is_verbose:
print('BEGIN:')
for s, d in sources.items():
if is_verbose:
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s
), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if is_verbose:
print('END!!!')
if is_verbose:
print()
print('Done.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__target__ = '${EXTERNAL_HOST}'
sources = {}
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert os.path.exists(fname) and os.path.isfile(fname
), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert os.path.exists(dest) and os.path.isfile(dest
), 'Cannot proceed without the dest file in process_the_source().'
if __name__ == '__main__':
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'
assert os.path.exists(root) and os.path.isdir(root
), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if is_verbose:
print('BEGIN:')
for s, d in sources.items():
if is_verbose:
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s
), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if is_verbose:
print('END!!!')
if is_verbose:
print()
print('Done.')
<|reserved_special_token_1|>
import os
import sys
import socket
__target__ = '${EXTERNAL_HOST}'
sources = {}
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert os.path.exists(fname) and os.path.isfile(fname
), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert os.path.exists(dest) and os.path.isfile(dest
), 'Cannot proceed without the dest file in process_the_source().'
if __name__ == '__main__':
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'
assert os.path.exists(root) and os.path.isdir(root
), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if is_verbose:
print('BEGIN:')
for s, d in sources.items():
if is_verbose:
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s
), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if is_verbose:
print('END!!!')
if is_verbose:
print()
print('Done.')
<|reserved_special_token_1|>
import os
import sys
import socket
__target__ = '${EXTERNAL_HOST}'
sources = {}
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'
if (__name__ == '__main__'):
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'
assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if (is_verbose):
print('BEGIN:')
for s,d in sources.items():
if (is_verbose):
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if (is_verbose):
print('END!!!')
if (is_verbose):
print()
print('Done.')
|
flexible
|
{
"blob_id": "d6af9a75fbe8bdf1a81a352cee71ac81fb373b86",
"index": 9926,
"step-1": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-3": "<mask token>\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-4": "import os\nimport sys\nimport socket\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-5": "import os\nimport sys\nimport socket\n\n__target__ = '${EXTERNAL_HOST}'\n\nsources = {}\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'\n \n\nif (__name__ == '__main__'):\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'\n\n assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n\n if (is_verbose):\n print('BEGIN:')\n for s,d in sources.items():\n if (is_verbose):\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if (is_verbose):\n print('END!!!')\n\n if (is_verbose):\n print()\n print('Done.')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
<|reserved_special_token_0|>
solve(dpArr=inp, list=[], box=0, i=0)
print('Minimum weigth for', boxes, 'box(es) =', ans)
<|reserved_special_token_1|>
boxes = 0
ans = -1
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
inp = input('Enter Input : ')
inp, boxes = list(map(int, inp.split('/')[0].split())), int(inp.split('/')[1])
solve(dpArr=inp, list=[], box=0, i=0)
print('Minimum weigth for', boxes, 'box(es) =', ans)
<|reserved_special_token_1|>
# #1
# def bi_search(l, r, arr, x):
# # Code Here
# if(l == r):
# return arr[r] == x
# mid = (l + r)//2 + 1
# if(arr[mid] > x):
# return bi_search(l,mid-1,arr,x)
# else:
# return bi_search(mid,r,arr,x)
# inp = input('Enter Input : ').split('/')
# arr, k = list(map(int, inp[0].split())), int(inp[1])
# print(bi_search(0, len(arr) - 1, sorted(arr), k))
# #2
# def bi_search(l, r, arr, x):
# if(l == r):
# if arr[l] > x :
# return arr[l]
# else:
# return None
# mid = (l + r)//2 + 1
# res = None
# if(arr[mid] > x):
# res = bi_search(l,mid-1,arr,x)
# else:
# res = bi_search(mid,r,arr,x)
# return res if res else (arr[mid] if arr[mid] > x else None)
# inp = input('Enter Input : ').split('/')
# arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split()))
# for k in arr2:
# res = bi_search(0, len(arr) - 1, arr, k)
# print(res if res else "No First Greater Value")
#3
# class Data:
# def __init__(self, key, value):
# self.key = key
# self.value = value
# def __str__(self):
# return "({0}, {1})".format(self.key, self.value)
# class hash:
# def __init__(self,max,chain):
# self.data = [None for i in range(max)]
# self.limit= max
# self.chain= chain
# self.length = 0
# def code(self,a):
# return sum([ord(i) for i in a])
# def isFull(self):
# return self.length == self.limit
# def insert(self,value):
# key,val = value.split(" ")
# s = self.code(key)
# co = 0
# now = 0
# while(co <= self.chain):
# if(co != 0):
# print ("collision number",co,"at",now)
# if(co == self.chain):
# break
# now = (s + (0 if not co else co*co) ) % self.limit
# if(self.data[now] == None):
# self.data[now] = Data(key,val)
# self.length += 1
# break
# co += 1
# if(co >= self.chain):
# print("Max of collisionChain")
# def __str__(self):
# return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n---------------------------"
# print(" ***** Fun with hashing *****")
# val,arr = input("Enter Input : ").split("/")
# h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]))
# arr = arr.split(",")
# for i in arr:
# h.insert(i)
# print(h)
# if(h.isFull()):
# print("This table is full !!!!!!")
# break
#4
# import math
# class Data:
# def __init__(self, value):
# self.value = value
# def __str__(self):
# return str(self.value)
# class hash:
# def __init__(self,max,chain,t):
# self.data = [None for i in range(max)]
# self.limit = max
# self.chain = chain
# self.length = 0
# self.threshold = t
# self.bu = list()
# def code(self,a):
# # return sum([ord(i) for i in a])
# return int(a)
# def isFull(self):
# return self.length == self.limit
# def findNearPrime(self):
# i = self.limit * 2
# while(True):
# c = True
# for j in range(2, int(math.sqrt(i)) + 1):
# if(not i % j):
# i += 1
# c = False
# break
# if c :
# break
# return i
# def handlerIllegal(self,co,value):
# if(self.length * 100 // self.limit >= self.threshold):
# print("****** Data over threshold - Rehash !!! ******")
# self.resize()
# self.Rehash()
# elif (co >= self.chain):
# print("****** Max collision - Rehash !!! ******")
# self.resize()
# self.Rehash()
# def resize(self):
# self.data += [None for i in range(self.findNearPrime() - self.limit)]
# self.limit = len(self.data)
# def Rehash(self):
# for i in range(self.limit):
# self.data[i] = None
# for i in self.bu:
# self.insert(i,False)
# def insert(self,value,Rehash = True):
# s = self.code(value)
# co = 0
# now = 0
# while(co <= self.chain):
# if(co != 0):
# print ("collision number",co,"at",now)
# if(co == self.chain):
# break
# now = (s + (0 if not co else co*co) ) % self.limit
# if(self.data[now] == None):
# self.data[now] = Data(value)
# if(Rehash):
# self.length += 1
# break
# co += 1
# if(Rehash):
# self.handlerIllegal(co,value)
# def addBuff(self,value):
# self.bu.append(value)
# def __str__(self):
# return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n----------------------------------------"
# print(" ***** Rehashing *****")
# val,arr = input("Enter Input : ").split("/")
# h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]),int(val.split(" ")[2]))
# arr = arr.split()
# print("Initial Table :",h,sep="\n")
# for i in arr:
# print("Add :",i)
# h.addBuff(i)
# h.insert(i)
# print(h)
# if(h.isFull()):
# print("This table is full !!!!!!")
# break
# 5
boxes = 0
ans = -1
def solve(dpArr,list,box,i):
global boxes
global ans
if(box == boxes):
s = 0
for j in list:
s += len(j)
if(s == len(dpArr)):
mx = 0
for j in list:
if(sum(j) > mx):
mx = sum(j)
if(mx < ans or ans == -1):
ans = mx
return
for j in range(1,len(dpArr) + 1):
if ( i + j > len(dpArr) ):
break
solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j)
inp = input("Enter Input : ")
inp,boxes = list(map(int,inp.split("/")[0].split() )) , int( inp.split("/")[1])
# for i in range(1,len(inp)):
# inp[i] += inp[i-1]
solve(dpArr = inp,list = [],box = 0,i = 0)
print("Minimum weigth for",boxes,"box(es) =",ans)
|
flexible
|
{
"blob_id": "883b4de18dddede97f850e3a184a0e1072bda99e",
"index": 814,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n",
"step-4": "boxes = 0\nans = -1\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\ninp = input('Enter Input : ')\ninp, boxes = list(map(int, inp.split('/')[0].split())), int(inp.split('/')[1])\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n",
"step-5": "# #1\n# def bi_search(l, r, arr, x):\n# # Code Here\n# if(l == r):\n# return arr[r] == x\n \n# mid = (l + r)//2 + 1\n# if(arr[mid] > x):\n# return bi_search(l,mid-1,arr,x)\n# else:\n# return bi_search(mid,r,arr,x)\n\n# inp = input('Enter Input : ').split('/')\n# arr, k = list(map(int, inp[0].split())), int(inp[1])\n# print(bi_search(0, len(arr) - 1, sorted(arr), k))\n\n# #2\n# def bi_search(l, r, arr, x):\n# if(l == r):\n# if arr[l] > x :\n# return arr[l]\n# else: \n# return None\n\n# mid = (l + r)//2 + 1\n# res = None\n# if(arr[mid] > x):\n# res = bi_search(l,mid-1,arr,x)\n# else:\n# res = bi_search(mid,r,arr,x)\n# return res if res else (arr[mid] if arr[mid] > x else None)\n\n\n# inp = input('Enter Input : ').split('/')\n# arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split()))\n# for k in arr2:\n# res = bi_search(0, len(arr) - 1, arr, k) \n# print(res if res else \"No First Greater Value\")\n\n#3\n# class Data:\n# def __init__(self, key, value):\n# self.key = key\n# self.value = value\n\n# def __str__(self):\n# return \"({0}, {1})\".format(self.key, self.value)\n\n# class hash:\n\n# def __init__(self,max,chain):\n# self.data = [None for i in range(max)]\n# self.limit= max\n# self.chain= chain\n# self.length = 0\n\n# def code(self,a):\n# return sum([ord(i) for i in a]) \n\n# def isFull(self):\n# return self.length == self.limit\n\n# def insert(self,value):\n# key,val = value.split(\" \")\n# s = self.code(key)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n \n\n# if(self.data[now] == None):\n# self.data[now] = Data(key,val)\n# self.length += 1\n# break\n# co += 1\n\n# if(co >= self.chain):\n# print(\"Max of collisionChain\")\n\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n---------------------------\"\n\n\n# print(\" ***** Fun with hashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]))\n\n# arr = arr.split(\",\")\n\n# for i in arr:\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n#4\n# import math\n# class Data:\n# def __init__(self, value):\n# self.value = value\n\n# def __str__(self):\n# return str(self.value)\n\n# class hash:\n\n# def __init__(self,max,chain,t):\n# self.data = [None for i in range(max)]\n# self.limit = max\n# self.chain = chain\n# self.length = 0\n# self.threshold = t\n# self.bu = list()\n\n# def code(self,a):\n# # return sum([ord(i) for i in a]) \n# return int(a)\n\n# def isFull(self):\n# return self.length == self.limit\n\n# def findNearPrime(self):\n# i = self.limit * 2\n# while(True):\n# c = True\n# for j in range(2, int(math.sqrt(i)) + 1):\n# if(not i % j):\n# i += 1\n# c = False\n# break\n# if c :\n# break\n\n# return i\n\n# def handlerIllegal(self,co,value):\n# if(self.length * 100 // self.limit >= self.threshold):\n# print(\"****** Data over threshold - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n# elif (co >= self.chain):\n# print(\"****** Max collision - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n\n# def resize(self):\n# self.data += [None for i in range(self.findNearPrime() - self.limit)]\n# self.limit = len(self.data)\n\n# def Rehash(self):\n# for i in range(self.limit):\n# self.data[i] = None\n# for i in self.bu:\n# self.insert(i,False)\n\n# def insert(self,value,Rehash = True):\n# s = self.code(value)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n\n# if(self.data[now] == None):\n# self.data[now] = Data(value)\n# if(Rehash):\n# self.length += 1\n# break\n# co += 1\n\n# if(Rehash):\n# self.handlerIllegal(co,value)\n\n# def addBuff(self,value):\n# self.bu.append(value)\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n----------------------------------------\"\n\n\n# print(\" ***** Rehashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]),int(val.split(\" \")[2]))\n\n# arr = arr.split()\n\n# print(\"Initial Table :\",h,sep=\"\\n\")\n\n# for i in arr:\n# print(\"Add :\",i)\n# h.addBuff(i)\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n# 5\nboxes = 0\nans = -1\ndef solve(dpArr,list,box,i):\n global boxes \n global ans\n if(box == boxes):\n s = 0\n for j in list:\n s += len(j)\n \n if(s == len(dpArr)):\n mx = 0\n for j in list:\n if(sum(j) > mx):\n mx = sum(j)\n\n if(mx < ans or ans == -1):\n ans = mx \n return\n\n for j in range(1,len(dpArr) + 1):\n if ( i + j > len(dpArr) ):\n break\n solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j)\n\n\ninp = input(\"Enter Input : \")\n\ninp,boxes = list(map(int,inp.split(\"/\")[0].split() )) , int( inp.split(\"/\")[1])\n\n# for i in range(1,len(inp)):\n# inp[i] += inp[i-1]\n\nsolve(dpArr = inp,list = [],box = 0,i = 0)\nprint(\"Minimum weigth for\",boxes,\"box(es) =\",ans)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [
204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [
255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,
0], [153, 0, 0]], dtype=float)
acccbar = cpool / 255.0
thecmap = col.ListedColormap(acccbar, 'acccbar')
cm.register_cmap(cmap=thecmap)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [
204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [
255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,
0], [153, 0, 0]], dtype=float)
acccbar = cpool / 255.0
thecmap = col.ListedColormap(acccbar, 'acccbar')
cm.register_cmap(cmap=thecmap)
return
register_cccmacms()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import matplotlib.colors as col
import matplotlib.cm as cm
import numpy as np
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [
204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [
255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,
0], [153, 0, 0]], dtype=float)
acccbar = cpool / 255.0
thecmap = col.ListedColormap(acccbar, 'acccbar')
cm.register_cmap(cmap=thecmap)
return
register_cccmacms()
<|reserved_special_token_1|>
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 13:25:03 2020
@author: Dr. Michael Sigmond, Canadian Centre for Climate Modelling and Analysis
"""
import matplotlib.colors as col
import matplotlib.cm as cm
import numpy as np
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
#print 'registering cmaps'
# define individual colors as RGB triples
# from colorwheel.m
# =============================================
# kem_w20 (20) OR blue2red_w20
# blueish at top, white in middle, reddish at bottom
cpool = np.array([ [153,255,255], \
[204,255,229], \
[240,255,240],\
[204,255,153],\
[178,255,102],\
[216,255,76],\
[255,255,51],\
[255,220,51],\
[255,187,51],\
[255,153,51],\
[255,0,0],\
[204,0,0],\
[153,0,0]], \
dtype=float)
acccbar = (cpool/255.)
thecmap = col.ListedColormap(acccbar,'acccbar')
cm.register_cmap(cmap=thecmap)
return
register_cccmacms()
|
flexible
|
{
"blob_id": "31a5bf0b275238e651dcb93ce80446a49a4edcf4",
"index": 6561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\nregister_cccmacms()\n",
"step-4": "<mask token>\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport numpy as np\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\nregister_cccmacms()\n",
"step-5": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 13:25:03 2020\n\n@author: Dr. Michael Sigmond, Canadian Centre for Climate Modelling and Analysis\n\"\"\"\n\n\nimport matplotlib.colors as col\n\n\nimport matplotlib.cm as cm\n\nimport numpy as np\n\n\ndef register_cccmacms(cmap='all'):\n \n \n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n \n \n #print 'registering cmaps'\n \n \n \n \n \n \n # define individual colors as RGB triples\n \n \n # from colorwheel.m\n \n \n # =============================================\n \n \n # kem_w20 (20) OR blue2red_w20\n \n \n # blueish at top, white in middle, reddish at bottom\n \n \n \n cpool = np.array([ [153,255,255], \\\n \n \n [204,255,229], \\\n \n \n [240,255,240],\\\n \n \n [204,255,153],\\\n \n \n [178,255,102],\\\n \n \n [216,255,76],\\\n \n \n [255,255,51],\\\n \n \n [255,220,51],\\\n \n \n [255,187,51],\\\n \n \n [255,153,51],\\\n \n \n [255,0,0],\\\n \n \n [204,0,0],\\\n \n \n [153,0,0]], \\\n \n \n dtype=float)\n \n \n \n acccbar = (cpool/255.)\n \n \n thecmap = col.ListedColormap(acccbar,'acccbar')\n \n \n cm.register_cmap(cmap=thecmap)\n\n return\n\nregister_cccmacms()\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
alias_macro = {
"class": "Application",
"method": "alias_macro",
"doc": """
Returns or modifies the macro of a command alias.
""",
"syntax": """
Rhino.AliasMacro (strAlias [, strMacro])
""",
"params": {
0: {
"name": "alias",
"optional": False,
"type_vb": "string",
"type_string": "str",
"doc": """
The name of an existing command alias.
"""
},
1: {
"name": "macro",
"optional": True,
"type_vb": "string",
"type_string": "str",
"doc": """
The new macro to run when the alias is executed.
"""
},
},
"returns": {
0: {
"type_vb": "String",
"doc": "If a new macro is not specified, the existing macro if successful."
},
1: {
"type_vb": "String",
"doc": "If a new macro is specified, the previous macro if successful."
},
2: {
"type_vb": "Null",
"doc": "If not successful, or on error."
},
}
}
|
normal
|
{
"blob_id": "1574f034ff9b6ddb785e4c54758b2057009198ed",
"index": 7587,
"step-1": "<mask token>\n",
"step-2": "alias_macro = {'class': 'Application', 'method': 'alias_macro', 'doc':\n \"\"\"\n Returns or modifies the macro of a command alias.\n \"\"\",\n 'syntax': \"\"\"\n Rhino.AliasMacro (strAlias [, strMacro])\n \"\"\",\n 'params': {(0): {'name': 'alias', 'optional': False, 'type_vb':\n 'string', 'type_string': 'str', 'doc':\n \"\"\"\n The name of an existing command alias.\n \"\"\"}, (1\n ): {'name': 'macro', 'optional': True, 'type_vb': 'string',\n 'type_string': 'str', 'doc':\n \"\"\"\n The new macro to run when the alias is executed.\n \"\"\"\n }}, 'returns': {(0): {'type_vb': 'String', 'doc':\n 'If a new macro is not specified, the existing macro if successful.'},\n (1): {'type_vb': 'String', 'doc':\n 'If a new macro is specified, the previous macro if successful.'}, (2):\n {'type_vb': 'Null', 'doc': 'If not successful, or on error.'}}}\n",
"step-3": "alias_macro = {\r\n\r\n \"class\": \"Application\",\r\n \"method\": \"alias_macro\",\r\n \"doc\": \"\"\"\r\n Returns or modifies the macro of a command alias.\r\n \"\"\",\r\n\r\n \"syntax\": \"\"\"\r\n Rhino.AliasMacro (strAlias [, strMacro])\r\n \"\"\",\r\n\r\n \"params\": {\r\n 0: {\r\n \"name\": \"alias\",\r\n \"optional\": False,\r\n \"type_vb\": \"string\",\r\n \"type_string\": \"str\",\r\n \"doc\": \"\"\"\r\n The name of an existing command alias.\r\n \"\"\"\r\n },\r\n 1: {\r\n \"name\": \"macro\",\r\n \"optional\": True,\r\n \"type_vb\": \"string\",\r\n \"type_string\": \"str\",\r\n \"doc\": \"\"\"\r\n The new macro to run when the alias is executed.\r\n \"\"\"\r\n },\r\n },\r\n\r\n \"returns\": {\r\n 0: {\r\n \"type_vb\": \"String\",\r\n \"doc\": \"If a new macro is not specified, the existing macro if successful.\"\r\n },\r\n 1: {\r\n \"type_vb\": \"String\",\r\n \"doc\": \"If a new macro is specified, the previous macro if successful.\"\r\n },\r\n 2: {\r\n \"type_vb\": \"Null\",\r\n \"doc\": \"If not successful, or on error.\"\r\n },\r\n }\r\n\r\n}\r\n\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
def is_multiple(value, base):
return 0 == (value % base)
def fizz_buzz(value):
if is_multiple(value, 5) and is_multiple(value, 3):
return "FizzBuzz"
if is_multiple(value, 3):
return "Fizz"
if is_multiple(value, 5):
return "Buzz"
return str(value)
class FizzBuzzTest(unittest.TestCase):
def check_fizz_buzz(self, value, expected):
result = fizz_buzz(value)
self.assertEqual(expected, result)
def test_fizz_buzz__fizz_buzz_1_1(self):
self.check_fizz_buzz(1, "1")
def test_fizz_buzz__fizz_buzz_2_2(self):
self.check_fizz_buzz(2, "2")
def test_fizz_buzz__fizz_buzz_3_Fizz(self):
self.check_fizz_buzz(3, "Fizz")
def test_fizz_buzz__fizz_buzz_5_Buzz(self):
self.check_fizz_buzz(5, "Buzz")
def test_fizz_buzz__fizz_buzz_6_Fizz(self):
self.check_fizz_buzz(6, "Fizz")
def test_fizz_buzz__fizz_buzz_10_Buzz(self):
self.check_fizz_buzz(10, "Buzz")
def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):
self.check_fizz_buzz(15, "FizzBuzz")
if __name__ == "__main__":
print("Running all unit tests...")
unittest.main()
|
normal
|
{
"blob_id": "59d543ed443c156ac65f9c806ba5bada6bcd0c21",
"index": 6891,
"step-1": "<mask token>\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n <mask token>\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n <mask token>\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_multiple(value, base):\n return 0 == value % base\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef is_multiple(value, base):\n return 0 == value % base\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\nif __name__ == '__main__':\n print('Running all unit tests...')\n unittest.main()\n",
"step-5": "import unittest\n\n\ndef is_multiple(value, base):\n return 0 == (value % base)\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return \"FizzBuzz\"\n if is_multiple(value, 3):\n return \"Fizz\"\n if is_multiple(value, 5):\n return \"Buzz\"\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, \"1\")\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, \"2\")\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, \"Fizz\")\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, \"Buzz\")\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, \"Fizz\")\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, \"Buzz\")\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, \"FizzBuzz\")\n\n\nif __name__ == \"__main__\":\n print(\"Running all unit tests...\")\n unittest.main()\n",
"step-ids": [
7,
10,
11,
12,
14
]
}
|
[
7,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
class Test(unittest.TestCase):
<|reserved_special_token_0|>
def tearDown(self):
pass
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',
'-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,
'should return None')
def test_make_move(self):
self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [
'o', 'x', '-']]
self.the_board.whose_turn = 'o'
self.the_board.MakeMove([1, 1])
self.assertEqual(self.the_board.board_array[1][1], 'o',
'should be an o')
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
NUM_GAMES = 10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',
'-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,
'should return None')
def test_make_move(self):
self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [
'o', 'x', '-']]
self.the_board.whose_turn = 'o'
self.the_board.MakeMove([1, 1])
self.assertEqual(self.the_board.board_array[1][1], 'o',
'should be an o')
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
<|reserved_special_token_0|>
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'x'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
self.assertIn([2, 2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'o'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 0], threatening_moves)
self.assertIn([2, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 1], threatening_moves)
self.assertIn([2, 1], threatening_moves)
self.assertIn([1, 0], threatening_moves)
self.assertIn([1, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
NUM_GAMES = 10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',
'-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,
'should return None')
def test_make_move(self):
self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [
'o', 'x', '-']]
self.the_board.whose_turn = 'o'
self.the_board.MakeMove([1, 1])
self.assertEqual(self.the_board.board_array[1][1], 'o',
'should be an o')
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'-', '-', '-']]
self.the_board.whose_turn = 'x'
move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [2, 1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':
'o', 'move': [2, 2]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'x'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
self.assertIn([2, 2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'o'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 0], threatening_moves)
self.assertIn([2, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 1], threatening_moves)
self.assertIn([2, 1], threatening_moves)
self.assertIn([1, 0], threatening_moves)
self.assertIn([1, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
NUM_GAMES = 10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',
'-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',
'-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',
'-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,
'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',
'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,
'should return None')
def test_make_move(self):
self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [
'o', 'x', '-']]
self.the_board.whose_turn = 'o'
self.the_board.MakeMove([1, 1])
self.assertEqual(self.the_board.board_array[1][1], 'o',
'should be an o')
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'-', '-', '-']]
self.the_board.whose_turn = 'x'
move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [2, 1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',
'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':
'o', 'move': [2, 2]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'x'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
self.assertIn([2, 2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [
'o', 'o', '-']]
self.the_board.whose_turn = 'o'
winning_moves = self.the_board.GetWinningMovesFor('human')
d_pr(winning_moves)
self.assertIn([0, 1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 0], threatening_moves)
self.assertIn([2, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [
'o', '-', '-']]
self.the_board.whose_turn = 'x'
threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self
.the_board.GetEmptySquares())
self.assertIn([0, 1], threatening_moves)
self.assertIn([2, 1], threatening_moves)
self.assertIn([1, 0], threatening_moves)
self.assertIn([1, 2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
NUM_GAMES = 10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [
'x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
'''
Created on Nov 16, 2013
@author: mo
'''
import unittest
from Board import TicTacToe_Board
from ComputerPlayer import ComputerPlayer
from utils import debug_print as d_pr
from main import StartNewGame
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
#these may be impossible boards, but still it tests the win detector
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'],
['o', 'x', 'o'],
['o', 'x', 'o']]), 'x', "should return x")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', 'o'],
['o', 'x', 'o'],
['x', 'o', 'x']
]) , 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'x', 'x'],
['-', '-', '-']
]), 'x', 'should return x'
)
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'],
['o', 'x', 'x'],
['o', 'o', 'x']]), 'o', "should return o")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', '-'],
['o', 'o', 'o'],
['o', 'x', 'x']
]) , 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'o', 'x'],
['-', '-', 'o']
]), 'o', 'should return o'
)
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'],
['o', '-', 'o'],
['o', '-', 'o']]), None, "should return None")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['-', '-', '-'],
['-', '-', '-'],
['x', 'o', 'x']
]) , None, 'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['-', '-', 'x'],
['-', 'o', 'o']
]), None, 'should return None'
)
def test_make_move(self):
self.the_board.board_array=[ ['x', '-', 'x'],
['o', '-', 'o'],
['o', 'x', '-']
]
self.the_board.whose_turn='o'
self.the_board.MakeMove([1,1])
self.assertEqual(self.the_board.board_array[1][1], 'o', "should be an o")
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['-', '-', '-']
]
self.the_board.whose_turn = 'x'
move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]
out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},
{'player': 'o', 'move' : [2,2] }
]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'x'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
self.assertIn([2,2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'o'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,0], threatening_moves)
self.assertIn([2,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'o'],
['-', 'x', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,1], threatening_moves)
self.assertIn([2,1], threatening_moves)
self.assertIn([1,0], threatening_moves)
self.assertIn([1,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
#NUM_GAMES=100000 # this works but takes a long time
NUM_GAMES=10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
flexible
|
{
"blob_id": "1968923cd923e68dc5ff2148802f18e40a5e6c33",
"index": 939,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n <mask token>\n <mask token>\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nCreated on Nov 16, 2013\n\n@author: mo\n'''\nimport unittest\nfrom Board import TicTacToe_Board\nfrom ComputerPlayer import ComputerPlayer\nfrom utils import debug_print as d_pr\n\nfrom main import StartNewGame\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n \n def tearDown(self):\n pass\n\n #these may be impossible boards, but still it tests the win detector\n \n def test_these_should_win_for_x(self):\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'], \n ['o', 'x', 'o'], \n ['o', 'x', 'o']]), 'x', \"should return x\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', 'o'],\n ['o', 'x', 'o'],\n ['x', 'o', 'x']\n \n \n ]) , 'x', 'should return x')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'x', 'x'],\n ['-', '-', '-']\n ]), 'x', 'should return x'\n )\n \n \n \n def test_these_should_win_for_o(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'], \n ['o', 'x', 'x'], \n ['o', 'o', 'x']]), 'o', \"should return o\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', '-'],\n ['o', 'o', 'o'],\n ['o', 'x', 'x']\n \n \n ]) , 'o', 'should return o')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'o', 'x'],\n ['-', '-', 'o']\n ]), 'o', 'should return o'\n )\n \n\n\n def test_these_should_win_for_nobody(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'], \n ['o', '-', 'o'], \n ['o', '-', 'o']]), None, \"should return None\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['-', '-', '-'],\n ['-', '-', '-'],\n ['x', 'o', 'x']\n \n \n ]) , None, 'should return None')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['-', '-', 'x'],\n ['-', 'o', 'o']\n ]), None, 'should return None'\n )\n \n def test_make_move(self):\n \n self.the_board.board_array=[ ['x', '-', 'x'],\n ['o', '-', 'o'],\n ['o', 'x', '-']\n ]\n \n self.the_board.whose_turn='o'\n \n self.the_board.MakeMove([1,1])\n \n self.assertEqual(self.the_board.board_array[1][1], 'o', \"should be an o\")\n \n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n \n \n\n def test_computer_player_get_outcome(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['-', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]\n \n out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n \n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n \n \n move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n\n move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},\n {'player': 'o', 'move' : [2,2] }\n ]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n \n self.assertEqual(out, 'o', 'o should win')\n \n \n def test_get_winning_moves_for_opponent(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n self.assertIn([2,2], winning_moves)\n \n comp_player = ComputerPlayer('o', self.the_board)\n \n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'o'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n \n \n \n def test_get_threatening_moves(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,0], threatening_moves)\n self.assertIn([2,2], threatening_moves)\n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 2)\n \n \n \n \n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'o'],\n ['-', 'x', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,1], threatening_moves)\n self.assertIn([2,1], threatening_moves)\n self.assertIn([1,0], threatening_moves)\n self.assertIn([1,2], threatening_moves)\n \n \n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 4)\n \n \n \n \n def test_algorithm_by_playing_large_num_of_random_games(self):\n \n NUM_GAMES = 10\n #NUM_GAMES=100000 # this works but takes a long time\n NUM_GAMES=10\n \n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n \n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n \n \n def test_print(self):\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['x', 'o', '-']]\n \n self.the_board.PrintBoardToConsole()\n \n \n def test_empty_squares(self):\n pass\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n",
"step-ids": [
9,
12,
13,
14,
16
]
}
|
[
9,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
for j in range(n):
if graph[i][j] == 2:
graph[i][j] = 0
virus_lst.append((i, j))
<|reserved_special_token_0|>
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
<|reserved_special_token_0|>
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
<|reserved_special_token_0|>
for r in result:
if r != -1:
time = min(time, r)
flag = True
print(time if flag else -1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input = sys.stdin.readline
<|reserved_special_token_0|>
n, m = map(int, input().split())
graph = [list(map(int, input().split())) for i in range(n)]
virus_lst = []
for i in range(n):
for j in range(n):
if graph[i][j] == 2:
graph[i][j] = 0
virus_lst.append((i, j))
combs = combinations(virus_lst, m)
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
result = []
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
flag = False
time = 25000
for r in result:
if r != -1:
time = min(time, r)
flag = True
print(time if flag else -1)
<|reserved_special_token_1|>
import sys; input = sys.stdin.readline
from collections import deque
from itertools import combinations
from copy import deepcopy
n, m = map(int, input().split())
graph = [list(map(int,input().split())) for i in range(n)]
virus_lst = []
for i in range(n):
for j in range(n):
if graph[i][j]==2:
graph[i][j] = 0
virus_lst.append((i, j))
combs = combinations(virus_lst, m)
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k]+y, dx[k]+x
if 0<=b<n and 0<=a<n and g[b][a]==0:
g[b][a] = g[y][x] - 1
dq.append((b,a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j]==0:
return -1
mm = min(g[i][j], mm)
return -mm-1
result = []
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
flag = False
time = 25000
for r in result:
if r!=-1:
time = min(time, r)
flag = True
print(time if flag else -1)
|
flexible
|
{
"blob_id": "0e3bf0ddd654b92b2cd962a2f3935c639eeb0695",
"index": 2155,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef bfs(start_nodes, g):\n dq = deque()\n dq.extend(start_nodes)\n for i, j in start_nodes:\n g[i][j] = -1\n while dq:\n y, x = dq.popleft()\n for k in range(4):\n b, a = dy[k] + y, dx[k] + x\n if 0 <= b < n and 0 <= a < n and g[b][a] == 0:\n g[b][a] = g[y][x] - 1\n dq.append((b, a))\n mm = 25000\n for i in range(n):\n for j in range(n):\n if g[i][j] == 0:\n return -1\n mm = min(g[i][j], mm)\n return -mm - 1\n\n\n<mask token>\n",
"step-3": "<mask token>\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 2:\n graph[i][j] = 0\n virus_lst.append((i, j))\n<mask token>\n\n\ndef bfs(start_nodes, g):\n dq = deque()\n dq.extend(start_nodes)\n for i, j in start_nodes:\n g[i][j] = -1\n while dq:\n y, x = dq.popleft()\n for k in range(4):\n b, a = dy[k] + y, dx[k] + x\n if 0 <= b < n and 0 <= a < n and g[b][a] == 0:\n g[b][a] = g[y][x] - 1\n dq.append((b, a))\n mm = 25000\n for i in range(n):\n for j in range(n):\n if g[i][j] == 0:\n return -1\n mm = min(g[i][j], mm)\n return -mm - 1\n\n\n<mask token>\nfor comb in combs:\n result.append(bfs(comb, deepcopy(graph)))\n<mask token>\nfor r in result:\n if r != -1:\n time = min(time, r)\n flag = True\nprint(time if flag else -1)\n",
"step-4": "<mask token>\ninput = sys.stdin.readline\n<mask token>\nn, m = map(int, input().split())\ngraph = [list(map(int, input().split())) for i in range(n)]\nvirus_lst = []\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 2:\n graph[i][j] = 0\n virus_lst.append((i, j))\ncombs = combinations(virus_lst, m)\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\n\ndef bfs(start_nodes, g):\n dq = deque()\n dq.extend(start_nodes)\n for i, j in start_nodes:\n g[i][j] = -1\n while dq:\n y, x = dq.popleft()\n for k in range(4):\n b, a = dy[k] + y, dx[k] + x\n if 0 <= b < n and 0 <= a < n and g[b][a] == 0:\n g[b][a] = g[y][x] - 1\n dq.append((b, a))\n mm = 25000\n for i in range(n):\n for j in range(n):\n if g[i][j] == 0:\n return -1\n mm = min(g[i][j], mm)\n return -mm - 1\n\n\nresult = []\nfor comb in combs:\n result.append(bfs(comb, deepcopy(graph)))\nflag = False\ntime = 25000\nfor r in result:\n if r != -1:\n time = min(time, r)\n flag = True\nprint(time if flag else -1)\n",
"step-5": "import sys; input = sys.stdin.readline\nfrom collections import deque\nfrom itertools import combinations\nfrom copy import deepcopy\n\nn, m = map(int, input().split())\ngraph = [list(map(int,input().split())) for i in range(n)]\nvirus_lst = []\nfor i in range(n):\n for j in range(n):\n if graph[i][j]==2:\n graph[i][j] = 0\n virus_lst.append((i, j))\n\ncombs = combinations(virus_lst, m)\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\ndef bfs(start_nodes, g):\n dq = deque()\n dq.extend(start_nodes)\n for i, j in start_nodes:\n g[i][j] = -1\n\n while dq:\n y, x = dq.popleft()\n for k in range(4):\n b, a = dy[k]+y, dx[k]+x\n if 0<=b<n and 0<=a<n and g[b][a]==0:\n g[b][a] = g[y][x] - 1\n dq.append((b,a))\n mm = 25000\n for i in range(n):\n for j in range(n):\n if g[i][j]==0:\n return -1\n mm = min(g[i][j], mm)\n\n return -mm-1\n\n\nresult = []\nfor comb in combs:\n result.append(bfs(comb, deepcopy(graph)))\nflag = False\ntime = 25000\nfor r in result:\n if r!=-1:\n time = min(time, r)\n flag = True\nprint(time if flag else -1)",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
#Takes - Contact Name(Must be saved in phone's contact list), Message, Time as input
# and sends message to the given contact at given time
# Accuracy Level ~ Seconds. (Also depends on your network speed)
from selenium import webdriver
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
from selenium.webdriver.common.keys import Keys
import time
from threading import Timer
from datetime import datetime
driver.get("https://web.whatsapp.com/")
print("Scan the QR code to Log in...")
time.sleep(10)
nameofcontact = input('Give name of contact: ')
msg = input("Type the message you want to send: ")
print("Enter Time of sending Message (Hrs, Min & Sec...)")
hrs = int(input("Hrs: "))
mins = int(input("Min: "))
secs = int(input("Sec: "))
x=datetime.today()
y=x.replace(day=x.day+1, hour=hrs, minute=mins, second=secs, microsecond=0)
delta_t=y-x
secs=delta_t.seconds+1
def send_msg():
global nameofcontact, msg
css_path = 'span[title="' + nameofcontact + '"]'
nameofcontact = driver.find_element_by_css_selector(css_path)
nameofcontact.click()
chatbox = driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')
chatbox.send_keys(msg)
chatbox.send_keys(Keys.RETURN)
t = Timer(secs, send_msg)
t.start()
|
normal
|
{
"blob_id": "1811c0c5aca9d209638e2221cad2c30e80ee5199",
"index": 3116,
"step-1": "<mask token>\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\n<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\n<mask token>\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\n<mask token>\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\n<mask token>\nt.start()\n",
"step-3": "<mask token>\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\n<mask token>\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\nnameofcontact = input('Give name of contact: ')\nmsg = input('Type the message you want to send: ')\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\nhrs = int(input('Hrs: '))\nmins = int(input('Min: '))\nsecs = int(input('Sec: '))\nx = datetime.today()\ny = x.replace(day=x.day + 1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t = y - x\nsecs = delta_t.seconds + 1\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-4": "from selenium import webdriver\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom threading import Timer\nfrom datetime import datetime\ndriver.get('https://web.whatsapp.com/')\nprint('Scan the QR code to Log in...')\ntime.sleep(10)\nnameofcontact = input('Give name of contact: ')\nmsg = input('Type the message you want to send: ')\nprint('Enter Time of sending Message (Hrs, Min & Sec...)')\nhrs = int(input('Hrs: '))\nmins = int(input('Min: '))\nsecs = int(input('Sec: '))\nx = datetime.today()\ny = x.replace(day=x.day + 1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t = y - x\nsecs = delta_t.seconds + 1\n\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n chatbox = driver.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-5": "#Takes - Contact Name(Must be saved in phone's contact list), Message, Time as input\n# and sends message to the given contact at given time\n# Accuracy Level ~ Seconds. (Also depends on your network speed)\n\nfrom selenium import webdriver\nPATH = 'C:\\Program Files (x86)\\chromedriver.exe'\ndriver = webdriver.Chrome(PATH)\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom threading import Timer\nfrom datetime import datetime\n\ndriver.get(\"https://web.whatsapp.com/\")\nprint(\"Scan the QR code to Log in...\")\ntime.sleep(10)\n\nnameofcontact = input('Give name of contact: ')\nmsg = input(\"Type the message you want to send: \")\nprint(\"Enter Time of sending Message (Hrs, Min & Sec...)\")\nhrs = int(input(\"Hrs: \"))\nmins = int(input(\"Min: \"))\nsecs = int(input(\"Sec: \"))\n\n\nx=datetime.today()\ny=x.replace(day=x.day+1, hour=hrs, minute=mins, second=secs, microsecond=0)\ndelta_t=y-x\n\nsecs=delta_t.seconds+1\n\ndef send_msg():\n global nameofcontact, msg\n css_path = 'span[title=\"' + nameofcontact + '\"]'\n nameofcontact = driver.find_element_by_css_selector(css_path)\n nameofcontact.click()\n\n chatbox = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div/div/div[2]/div[1]/div/div[2]')\n chatbox.send_keys(msg)\n chatbox.send_keys(Keys.RETURN)\n\nt = Timer(secs, send_msg)\nt.start()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {'jsonrpc': '2.0', 'method': method, 'params': params,
'authorization': self._auth_token, 'id': '1'}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type='json'):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw_contents=
raw_contents, config_type=config_type)
return self.do_rpc('store_agent_config', **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity)
return self.do_rpc('list_agent_configs', **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw=raw)
return self.do_rpc('get_agent_config', **params)
<|reserved_special_token_0|>
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc('get_setting', key=key)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {'jsonrpc': '2.0', 'method': method, 'params': params,
'authorization': self._auth_token, 'id': '1'}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type='json'):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw_contents=
raw_contents, config_type=config_type)
return self.do_rpc('store_agent_config', **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity)
return self.do_rpc('list_agent_configs', **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw=raw)
return self.do_rpc('get_agent_config', **params)
<|reserved_special_token_0|>
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc('get_setting', key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc('get_setting_keys')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {'jsonrpc': '2.0', 'method': method, 'params': params,
'authorization': self._auth_token, 'id': '1'}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization', username=self._username,
password=self._password)
<|reserved_special_token_0|>
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type='json'):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw_contents=
raw_contents, config_type=config_type)
return self.do_rpc('store_agent_config', **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity)
return self.do_rpc('list_agent_configs', **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw=raw)
return self.do_rpc('get_agent_config', **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc('set_setting', key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc('get_setting', key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc('get_setting_keys')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {'jsonrpc': '2.0', 'method': method, 'params': params,
'authorization': self._auth_token, 'id': '1'}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization', username=self._username,
password=self._password)
def register_instance(self, addr, name=None):
"""
Register a platform with Volttron Central
:param addr: Platform's discovery address that will be registered
"""
return self.do_rpc('register_instance', discovery_address=addr,
display_name=name)
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type='json'):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw_contents=
raw_contents, config_type=config_type)
return self.do_rpc('store_agent_config', **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity)
return self.do_rpc('list_agent_configs', **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid, agent_identity=
agent_identity, config_name=config_name, raw=raw)
return self.do_rpc('get_agent_config', **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc('set_setting', key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc('get_setting', key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc('get_setting_keys')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import requests
"""
This example exposes the VOLTTRON web API
through a python class that that does not depend
on VOLTTRON proper. A VOLTTRON Central Agent must
be running on the url passed to the constructor.
"""
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'authorization': self._auth_token,
'id': '1'
}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization',
username=self._username,
password=self._password)
def register_instance(self, addr, name=None):
"""
Register a platform with Volttron Central
:param addr: Platform's discovery address that will be registered
"""
return self.do_rpc('register_instance',discovery_address=addr,
display_name=name)
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type="json"):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw_contents=raw_contents,
config_type=config_type)
return self.do_rpc("store_agent_config", **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity)
return self.do_rpc("list_agent_configs", **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw=raw)
return self.do_rpc("get_agent_config", **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc("set_setting", key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc("get_setting", key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc("get_setting_keys")
def validate_response(response):
"""
Validate that the message is a json-rpc response.
:param response:
:return:
"""
assert response.ok
rpcdict = response.json()
assert rpcdict['jsonrpc'] == '2.0'
assert rpcdict['id']
assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()
|
flexible
|
{
"blob_id": "6fdfcbcfdf2b680a1fbdb74f77fd5d1a9f7eac0b",
"index": 6105,
"step-1": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance', discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n\n# Copyright (c) 2017, Battelle Memorial Institute\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation\n# are those of the authors and should not be interpreted as representing\n# official policies, either expressed or implied, of the FreeBSD\n# Project.\n#\n# This material was prepared as an account of work sponsored by an\n# agency of the United States Government. Neither the United States\n# Government nor the United States Department of Energy, nor Battelle,\n# nor any of their employees, nor any jurisdiction or organization that\n# has cooperated in the development of these materials, makes any\n# warranty, express or implied, or assumes any legal liability or\n# responsibility for the accuracy, completeness, or usefulness or any\n# information, apparatus, product, software, or process disclosed, or\n# represents that its use would not infringe privately owned rights.\n#\n# Reference herein to any specific commercial product, process, or\n# service by trade name, trademark, manufacturer, or otherwise does not\n# necessarily constitute or imply its endorsement, recommendation, or\n# favoring by the United States Government or any agency thereof, or\n# Battelle Memorial Institute. The views and opinions of authors\n# expressed herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY\n# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n\n# }}}\n\nimport requests\n\n\"\"\"\nThis example exposes the VOLTTRON web API\nthrough a python class that that does not depend\non VOLTTRON proper. A VOLTTRON Central Agent must\nbe running on the url passed to the constructor.\n\"\"\"\n\nclass VolttronWebRPC(object):\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance',discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type=\"json\"):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw_contents=raw_contents,\n config_type=config_type)\n return self.do_rpc(\"store_agent_config\", **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity)\n return self.do_rpc(\"list_agent_configs\", **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw=raw)\n return self.do_rpc(\"get_agent_config\", **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc(\"set_setting\", key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc(\"get_setting\", key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc(\"get_setting_keys\")\n\n\ndef validate_response(response):\n \"\"\"\n Validate that the message is a json-rpc response.\n\n :param response:\n :return:\n \"\"\"\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
"""
Given two strings A and B of lowercase letters, return true
if and only if we can swap two letters in A so that the result
equals B.
Example 1:
Input: A = "ab", B = "ba"
Output: true
"""
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
if A == B and len(A) > len(set(A)):
return True
re1 = ""
re2 = ""
for i in range(len(A)):
if A[i] != B[i]:
re1 += A[i]
re2 += B[i]
if len(re1) == len(re2) == 2 and re1 == re2[::-1]:
return True
return False
|
normal
|
{
"blob_id": "dd902f99ee8dc23f56641b8e75544a2d4576c19a",
"index": 4437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def buddyStrings(self, A: str, B: str) ->bool:\n if len(A) != len(B):\n return False\n if A == B and len(A) > len(set(A)):\n return True\n re1 = ''\n re2 = ''\n for i in range(len(A)):\n if A[i] != B[i]:\n re1 += A[i]\n re2 += B[i]\n if len(re1) == len(re2) == 2 and re1 == re2[::-1]:\n return True\n return False\n",
"step-4": "\"\"\"\nGiven two strings A and B of lowercase letters, return true \nif and only if we can swap two letters in A so that the result \nequals B.\n\n Example 1:\n\n Input: A = \"ab\", B = \"ba\"\n Output: true\n\"\"\"\n\nclass Solution:\n def buddyStrings(self, A: str, B: str) -> bool:\n if len(A) != len(B):\n return False\n \n if A == B and len(A) > len(set(A)):\n return True\n \n re1 = \"\"\n re2 = \"\"\n for i in range(len(A)):\n if A[i] != B[i]:\n re1 += A[i]\n re2 += B[i] \n \n if len(re1) == len(re2) == 2 and re1 == re2[::-1]: \n return True\n \n return False\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):
job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'
headers = {'cache-control': 'no-cache', 'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {'ka': ka}
try:
r = requests.request('GET', job_url, headers=headers, params=
querystring)
content = r.content.decode('utf-8')
file = './raw_data/page/' + jid + '.html'
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = 'suceed'
except IOError:
result = 'failed'
log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result
print(log)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):
job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'
headers = {'cache-control': 'no-cache', 'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {'ka': ka}
try:
r = requests.request('GET', job_url, headers=headers, params=
querystring)
content = r.content.decode('utf-8')
file = './raw_data/page/' + jid + '.html'
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = 'suceed'
except IOError:
result = 'failed'
log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result
print(log)
if __name__ == '__main__':
file = './raw_data/list/job_list.csv'
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
<|reserved_special_token_1|>
import requests
import pandas as pd
import time
def job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):
job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'
headers = {'cache-control': 'no-cache', 'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {'ka': ka}
try:
r = requests.request('GET', job_url, headers=headers, params=
querystring)
content = r.content.decode('utf-8')
file = './raw_data/page/' + jid + '.html'
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = 'suceed'
except IOError:
result = 'failed'
log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result
print(log)
if __name__ == '__main__':
file = './raw_data/list/job_list.csv'
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
<|reserved_special_token_1|>
import requests
import pandas as pd
import time
def job_spider(jid="1913e38066dd3c8e1Hd40t--FVE~", ka="search_list_1", i=0):
# request info.
job_url = "https://www.zhipin.com/job_detail/" + jid + ".html"
headers = {
'cache-control': "no-cache",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {"ka": ka}
try:
# request
r = requests.request("GET", job_url, headers=headers, params=querystring)
content = r.content.decode('utf-8')
# raw data.
file = "./raw_data/page/" + jid + ".html"
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = "suceed"
except IOError:
result = "failed"
log = "job " + str(i) + " : " + jid + " crawl " + result
print(log)
if __name__ == "__main__":
file = "./raw_data/list/job_list.csv"
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
# print(jid_list)
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
|
flexible
|
{
"blob_id": "5b894eac93bff44931df4ef8d845c23071a03227",
"index": 3461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-4": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid='1913e38066dd3c8e1Hd40t--FVE~', ka='search_list_1', i=0):\n job_url = 'https://www.zhipin.com/job_detail/' + jid + '.html'\n headers = {'cache-control': 'no-cache', 'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'\n }\n querystring = {'ka': ka}\n try:\n r = requests.request('GET', job_url, headers=headers, params=\n querystring)\n content = r.content.decode('utf-8')\n file = './raw_data/page/' + jid + '.html'\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = 'suceed'\n except IOError:\n result = 'failed'\n log = 'job ' + str(i) + ' : ' + jid + ' crawl ' + result\n print(log)\n\n\nif __name__ == '__main__':\n file = './raw_data/list/job_list.csv'\n df = pd.read_csv(file, encoding='utf-8', header=None)\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-5": "import requests\nimport pandas as pd\nimport time\n\n\ndef job_spider(jid=\"1913e38066dd3c8e1Hd40t--FVE~\", ka=\"search_list_1\", i=0):\n # request info.\n job_url = \"https://www.zhipin.com/job_detail/\" + jid + \".html\"\n\n headers = {\n 'cache-control': \"no-cache\",\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.80 Safari/537.36'\n }\n\n querystring = {\"ka\": ka}\n\n try:\n # request\n r = requests.request(\"GET\", job_url, headers=headers, params=querystring)\n content = r.content.decode('utf-8')\n\n # raw data.\n file = \"./raw_data/page/\" + jid + \".html\"\n with open(file, 'w', encoding='utf-8') as f:\n f.write(content)\n result = \"suceed\"\n except IOError:\n result = \"failed\"\n\n log = \"job \" + str(i) + \" : \" + jid + \" crawl \" + result\n print(log)\n\n\nif __name__ == \"__main__\":\n\n file = \"./raw_data/list/job_list.csv\"\n df = pd.read_csv(file, encoding='utf-8', header=None)\n\n jid_list = df[0].values.tolist()\n ka_list = df[1].values.tolist()\n # print(jid_list)\n\n for i in range(0, len(jid_list)):\n job_spider(jid_list[i], ka_list[i], i)\n time.sleep(1)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
def rotation(arr, k, n):
k = k % n
swap(arr, 0, k - 1)
print(arr)
swap(arr, k, n - 1)
print(arr)
swap(arr, 0, n - 1)
print(arr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
def rotation(arr, k, n):
k = k % n
swap(arr, 0, k - 1)
print(arr)
swap(arr, k, n - 1)
print(arr)
swap(arr, 0, n - 1)
print(arr)
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
n = len(arr)
k = 4
rotation(arr, k, n)
<|reserved_special_token_1|>
# Time :O(N) space: O(1)
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
def rotation(arr, k, n):
k = k % n
swap(arr, 0, k-1)
print(arr)
swap(arr, k, n-1)
print(arr)
swap(arr, 0, n-1)
print(arr)
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
n = len(arr)
k = 4
rotation(arr, k, n)
|
flexible
|
{
"blob_id": "2180146da7ea745f5917ee66fd8c467437b5af4c",
"index": 6761,
"step-1": "<mask token>\n",
"step-2": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\n<mask token>\n",
"step-3": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k - 1)\n print(arr)\n swap(arr, k, n - 1)\n print(arr)\n swap(arr, 0, n - 1)\n print(arr)\n\n\n<mask token>\n",
"step-4": "def swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k - 1)\n print(arr)\n swap(arr, k, n - 1)\n print(arr)\n swap(arr, 0, n - 1)\n print(arr)\n\n\nif __name__ == '__main__':\n arr = [1, 2, 3, 4, 5, 6, 7]\n n = len(arr)\n k = 4\n rotation(arr, k, n)\n",
"step-5": "# Time :O(N) space: O(1)\ndef swap(arr, start, end):\n while start < end:\n arr[start], arr[end] = arr[end], arr[start]\n start += 1\n end -= 1\n\ndef rotation(arr, k, n):\n k = k % n\n swap(arr, 0, k-1)\n print(arr)\n swap(arr, k, n-1)\n print(arr)\n swap(arr, 0, n-1)\n print(arr)\n\nif __name__ == '__main__':\n arr = [1, 2, 3, 4, 5, 6, 7]\n n = len(arr)\n k = 4\n rotation(arr, k, n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from odoo import fields, models
class LunchWizard(models.TransientModel):
_name = "lunch.wizard"
_description = "LunchWizard"
lun_type = fields.Char(string="Set New Lunch Type")
lunch_id = fields.Many2one('lunch.lunch', string="Lunch Id")
def action_process_lunch(self):
self.lunch_id.lunch_type = self.lun_type
#self.write( { self.lunch_id.lunch_type : self.lun_type } )
|
normal
|
{
"blob_id": "85e5bf57f7eba2cbee0fbb8a4d37b5180208f9b7",
"index": 3830,
"step-1": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-3": "<mask token>\n\n\nclass LunchWizard(models.TransientModel):\n _name = 'lunch.wizard'\n _description = 'LunchWizard'\n lun_type = fields.Char(string='Set New Lunch Type')\n lunch_id = fields.Many2one('lunch.lunch', string='Lunch Id')\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-4": "from odoo import fields, models\n\n\nclass LunchWizard(models.TransientModel):\n _name = 'lunch.wizard'\n _description = 'LunchWizard'\n lun_type = fields.Char(string='Set New Lunch Type')\n lunch_id = fields.Many2one('lunch.lunch', string='Lunch Id')\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nfrom odoo import fields, models\n\n\nclass LunchWizard(models.TransientModel):\n _name = \"lunch.wizard\"\n _description = \"LunchWizard\"\n\n lun_type = fields.Char(string=\"Set New Lunch Type\")\n lunch_id = fields.Many2one('lunch.lunch', string=\"Lunch Id\")\n\n def action_process_lunch(self):\n self.lunch_id.lunch_type = self.lun_type\n #self.write( { self.lunch_id.lunch_type : self.lun_type } )",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class DotProductTest(simtest):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d > 511] = d[d > 511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq > 127] = cq[cq > 127] - 256
self._set_data(d)
do = self.dev.get('DotProductTest', 'datao_uu')
data_uu = min(1023, sum(cq * d) / 32)
self.assertTrue(do == data_uu, 'UU')
data_su = max(0, min(1023, sum(cs * d) / 32))
do = self.dev.get('DotProductTest', 'datao_su')
self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +
str(do) + ' ' + str(data_su))
data_us = max(-512, min(511, sum(cq * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_us')
if do > 511:
do = do - 1024
self.assertTrue(do == data_us, 'US')
data_ss = max(-512, min(511, sum(cs * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_ss')
if do > 511:
do = do - 1024
self.assertTrue(do == data_ss, 'SS')
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])
self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])
self._check([0, 0, 0], [1023, 1023, 1023])
self._check([1 / 32.0, 0, 0], [1, 100, 100])
self._check([1.0, 0, 0], [1, 100, 100])
self._check([0, 1.0, 0], [1, 100, 100])
self._check([0, 0, 1.0], [1, 100, 100])
self._check([1.0, 1.0, 1.0], [513, 513, 513])
self._check([1.0, 1.0, 1.0], [512, 512, 512])
self._check([1.0, 1.0, 1.0], [0, 512, 0])
self._check([0.0, 1.5, 0.0], [0, 680, 0])
self._check([0.0, 1.5, 0.0], [0, 681, 0])
self._check([0.0, 1.5, 0.0], [0, 682, 0])
self._check([0.0, 1.5, 0.0], [0, 683, 0])
self._check([0.0, 1.5, 0.0], [0, 339, 0])
self._check([0.0, 1.5, 0.0], [0, 340, 0])
self._check([0.0, 1.5, 0.0], [0, 341, 0])
self._check([0.0, 1.5, 0.0], [0, 342, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])
self._check([0.0, -1.0, 0.0], [0, 500, 0])
self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])
self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])
for idx in range(100):
data = [random.randint(0, 1023) for r in range(3)]
coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for
r in range(3)]
self._check(coeff, data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DotProductTest(simtest):
def _set_coeff(self, c):
cq = (c * 32).astype(numpy.uint8)
self.dev.set('DotProductTest', 'c0', cq[0])
self.dev.set('DotProductTest', 'c1', cq[1])
self.dev.set('DotProductTest', 'c2', cq[2])
return cq
<|reserved_special_token_0|>
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d > 511] = d[d > 511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq > 127] = cq[cq > 127] - 256
self._set_data(d)
do = self.dev.get('DotProductTest', 'datao_uu')
data_uu = min(1023, sum(cq * d) / 32)
self.assertTrue(do == data_uu, 'UU')
data_su = max(0, min(1023, sum(cs * d) / 32))
do = self.dev.get('DotProductTest', 'datao_su')
self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +
str(do) + ' ' + str(data_su))
data_us = max(-512, min(511, sum(cq * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_us')
if do > 511:
do = do - 1024
self.assertTrue(do == data_us, 'US')
data_ss = max(-512, min(511, sum(cs * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_ss')
if do > 511:
do = do - 1024
self.assertTrue(do == data_ss, 'SS')
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])
self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])
self._check([0, 0, 0], [1023, 1023, 1023])
self._check([1 / 32.0, 0, 0], [1, 100, 100])
self._check([1.0, 0, 0], [1, 100, 100])
self._check([0, 1.0, 0], [1, 100, 100])
self._check([0, 0, 1.0], [1, 100, 100])
self._check([1.0, 1.0, 1.0], [513, 513, 513])
self._check([1.0, 1.0, 1.0], [512, 512, 512])
self._check([1.0, 1.0, 1.0], [0, 512, 0])
self._check([0.0, 1.5, 0.0], [0, 680, 0])
self._check([0.0, 1.5, 0.0], [0, 681, 0])
self._check([0.0, 1.5, 0.0], [0, 682, 0])
self._check([0.0, 1.5, 0.0], [0, 683, 0])
self._check([0.0, 1.5, 0.0], [0, 339, 0])
self._check([0.0, 1.5, 0.0], [0, 340, 0])
self._check([0.0, 1.5, 0.0], [0, 341, 0])
self._check([0.0, 1.5, 0.0], [0, 342, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])
self._check([0.0, -1.0, 0.0], [0, 500, 0])
self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])
self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])
for idx in range(100):
data = [random.randint(0, 1023) for r in range(3)]
coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for
r in range(3)]
self._check(coeff, data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DotProductTest(simtest):
def _set_coeff(self, c):
cq = (c * 32).astype(numpy.uint8)
self.dev.set('DotProductTest', 'c0', cq[0])
self.dev.set('DotProductTest', 'c1', cq[1])
self.dev.set('DotProductTest', 'c2', cq[2])
return cq
def _set_data(self, d):
self.dev.set('DotProductTest', 'd0', d[0])
self.dev.set('DotProductTest', 'd1', d[1])
self.dev.set('DotProductTest', 'd2', d[2])
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d > 511] = d[d > 511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq > 127] = cq[cq > 127] - 256
self._set_data(d)
do = self.dev.get('DotProductTest', 'datao_uu')
data_uu = min(1023, sum(cq * d) / 32)
self.assertTrue(do == data_uu, 'UU')
data_su = max(0, min(1023, sum(cs * d) / 32))
do = self.dev.get('DotProductTest', 'datao_su')
self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +
str(do) + ' ' + str(data_su))
data_us = max(-512, min(511, sum(cq * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_us')
if do > 511:
do = do - 1024
self.assertTrue(do == data_us, 'US')
data_ss = max(-512, min(511, sum(cs * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_ss')
if do > 511:
do = do - 1024
self.assertTrue(do == data_ss, 'SS')
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])
self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])
self._check([0, 0, 0], [1023, 1023, 1023])
self._check([1 / 32.0, 0, 0], [1, 100, 100])
self._check([1.0, 0, 0], [1, 100, 100])
self._check([0, 1.0, 0], [1, 100, 100])
self._check([0, 0, 1.0], [1, 100, 100])
self._check([1.0, 1.0, 1.0], [513, 513, 513])
self._check([1.0, 1.0, 1.0], [512, 512, 512])
self._check([1.0, 1.0, 1.0], [0, 512, 0])
self._check([0.0, 1.5, 0.0], [0, 680, 0])
self._check([0.0, 1.5, 0.0], [0, 681, 0])
self._check([0.0, 1.5, 0.0], [0, 682, 0])
self._check([0.0, 1.5, 0.0], [0, 683, 0])
self._check([0.0, 1.5, 0.0], [0, 339, 0])
self._check([0.0, 1.5, 0.0], [0, 340, 0])
self._check([0.0, 1.5, 0.0], [0, 341, 0])
self._check([0.0, 1.5, 0.0], [0, 342, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])
self._check([0.0, -1.0, 0.0], [0, 500, 0])
self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])
self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])
for idx in range(100):
data = [random.randint(0, 1023) for r in range(3)]
coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for
r in range(3)]
self._check(coeff, data)
<|reserved_special_token_1|>
from basetest import simtest
import testutil
import logging, random
from nitro_parts.lib.imager import ccm as CCM
import numpy
class DotProductTest(simtest):
def _set_coeff(self, c):
cq = (c * 32).astype(numpy.uint8)
self.dev.set('DotProductTest', 'c0', cq[0])
self.dev.set('DotProductTest', 'c1', cq[1])
self.dev.set('DotProductTest', 'c2', cq[2])
return cq
def _set_data(self, d):
self.dev.set('DotProductTest', 'd0', d[0])
self.dev.set('DotProductTest', 'd1', d[1])
self.dev.set('DotProductTest', 'd2', d[2])
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d > 511] = d[d > 511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq > 127] = cq[cq > 127] - 256
self._set_data(d)
do = self.dev.get('DotProductTest', 'datao_uu')
data_uu = min(1023, sum(cq * d) / 32)
self.assertTrue(do == data_uu, 'UU')
data_su = max(0, min(1023, sum(cs * d) / 32))
do = self.dev.get('DotProductTest', 'datao_su')
self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +
str(do) + ' ' + str(data_su))
data_us = max(-512, min(511, sum(cq * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_us')
if do > 511:
do = do - 1024
self.assertTrue(do == data_us, 'US')
data_ss = max(-512, min(511, sum(cs * ds) / 32))
do = self.dev.get('DotProductTest', 'datao_ss')
if do > 511:
do = do - 1024
self.assertTrue(do == data_ss, 'SS')
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])
self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])
self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])
self._check([0, 0, 0], [1023, 1023, 1023])
self._check([1 / 32.0, 0, 0], [1, 100, 100])
self._check([1.0, 0, 0], [1, 100, 100])
self._check([0, 1.0, 0], [1, 100, 100])
self._check([0, 0, 1.0], [1, 100, 100])
self._check([1.0, 1.0, 1.0], [513, 513, 513])
self._check([1.0, 1.0, 1.0], [512, 512, 512])
self._check([1.0, 1.0, 1.0], [0, 512, 0])
self._check([0.0, 1.5, 0.0], [0, 680, 0])
self._check([0.0, 1.5, 0.0], [0, 681, 0])
self._check([0.0, 1.5, 0.0], [0, 682, 0])
self._check([0.0, 1.5, 0.0], [0, 683, 0])
self._check([0.0, 1.5, 0.0], [0, 339, 0])
self._check([0.0, 1.5, 0.0], [0, 340, 0])
self._check([0.0, 1.5, 0.0], [0, 341, 0])
self._check([0.0, 1.5, 0.0], [0, 342, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])
self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])
self._check([0.0, -1.0, 0.0], [0, 500, 0])
self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])
self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])
for idx in range(100):
data = [random.randint(0, 1023) for r in range(3)]
coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for
r in range(3)]
self._check(coeff, data)
<|reserved_special_token_1|>
from basetest import simtest
import testutil
import logging, random
from nitro_parts.lib.imager import ccm as CCM
import numpy
###############################################################################
class DotProductTest(simtest):
def _set_coeff(self, c):
cq = (c * 32).astype(numpy.uint8)
self.dev.set("DotProductTest","c0", cq[0])
self.dev.set("DotProductTest","c1", cq[1])
self.dev.set("DotProductTest","c2", cq[2])
return cq
def _set_data(self, d):
self.dev.set("DotProductTest","d0", d[0])
self.dev.set("DotProductTest","d1", d[1])
self.dev.set("DotProductTest","d2", d[2])
def _check(self, c, d):
d = numpy.array(d)
ds = d.copy()
ds[d>511] = d[d>511] - 1024
c = numpy.array(c)
cq = self._set_coeff(c)
cs = cq.astype(numpy.int8)
cs[cq>127] = cq[cq>127] - 256
self._set_data(d)
do = self.dev.get("DotProductTest","datao_uu")
data_uu = min(1023, sum(cq * d)/32)
#print "UU", do, data_uu
self.assertTrue( do == data_uu, "UU")
data_su = max(0, min(1023, sum(cs * d)/32))
do = self.dev.get("DotProductTest", "datao_su")
#print "SU", d, cs, do, data_su
self.assertTrue( do == data_su, "SU " + str(d) + " " + str(cs) + " " + str(do) + " " + str(data_su))
data_us = max(-512, min(511, sum(cq * ds)/32))
do = self.dev.get("DotProductTest", "datao_us")
if(do > 511): do = do - 1024
#print "US", do, ds, data_us
self.assertTrue( do == data_us, "US")
data_ss = max(-512, min(511, sum(cs * ds)/32))
do = self.dev.get("DotProductTest", "datao_ss")
if(do > 511): do = do - 1024
#print "SS", ds, cq, cs, do, data_ss
self.assertTrue( do == data_ss, "SS")
def testDotProduct(self):
"""Setup up Dot Product with various input and test output matches expectation."""
self._check([0.000, 1.000, 0.000], [ 1023, 1023, 1023 ])
self._check([0.125, 0.750, 0.125], [ 1023, 1023, 1023 ])
self._check([1/32., 1.000, 0.000], [ 1023, 1023, 1023 ])
self._check([1.000, 1.000, 1.000], [ 1023, 1023, 1023 ])
self._check([0, 0, 0], [ 1023, 1023, 1023 ])
self._check([1/32., 0, 0], [ 1, 100, 100 ])
self._check([1.0, 0, 0], [ 1, 100, 100 ])
self._check([0, 1.0, 0], [ 1, 100, 100 ])
self._check([0, 0, 1.0], [ 1, 100, 100 ])
self._check([1.000, 1.000, 1.000], [ 513, 513, 513 ])
self._check([1.000, 1.000, 1.000], [ 512, 512, 512 ])
self._check([1.000, 1.000, 1.000], [ 0, 512, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 680, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 681, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 682, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 683, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 339, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 340, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 341, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 342, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-338, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-339, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-340, 0 ])
self._check([0.000, 1.5, 0.000], [ 0, 1023-341, 0 ])
self._check([0.000, -1.0, 0.000], [ 0, 500, 0 ])
self._check([1/32., -1.0, 1/32.], [ 500, 500, 500 ])
self._check([-1/32., -1.0, -1/32.], [ 400, 400, 400 ])
for idx in range(100):
data = [ random.randint(0,1023) for r in range(3) ]
coeff = [ max(-2.0, min(127/32., random.random() * 4 - 2)) for r in range(3) ]
#print coeff, data
self._check(coeff, data)
|
flexible
|
{
"blob_id": "53110d6e7923cf65c514d54950a0be165582e9a0",
"index": 4909,
"step-1": "<mask token>\n\n\nclass DotProductTest(simtest):\n <mask token>\n <mask token>\n\n def _check(self, c, d):\n d = numpy.array(d)\n ds = d.copy()\n ds[d > 511] = d[d > 511] - 1024\n c = numpy.array(c)\n cq = self._set_coeff(c)\n cs = cq.astype(numpy.int8)\n cs[cq > 127] = cq[cq > 127] - 256\n self._set_data(d)\n do = self.dev.get('DotProductTest', 'datao_uu')\n data_uu = min(1023, sum(cq * d) / 32)\n self.assertTrue(do == data_uu, 'UU')\n data_su = max(0, min(1023, sum(cs * d) / 32))\n do = self.dev.get('DotProductTest', 'datao_su')\n self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +\n str(do) + ' ' + str(data_su))\n data_us = max(-512, min(511, sum(cq * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_us')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_us, 'US')\n data_ss = max(-512, min(511, sum(cs * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_ss')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_ss, 'SS')\n\n def testDotProduct(self):\n \"\"\"Setup up Dot Product with various input and test output matches expectation.\"\"\"\n self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])\n self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])\n self._check([0, 0, 0], [1023, 1023, 1023])\n self._check([1 / 32.0, 0, 0], [1, 100, 100])\n self._check([1.0, 0, 0], [1, 100, 100])\n self._check([0, 1.0, 0], [1, 100, 100])\n self._check([0, 0, 1.0], [1, 100, 100])\n self._check([1.0, 1.0, 1.0], [513, 513, 513])\n self._check([1.0, 1.0, 1.0], [512, 512, 512])\n self._check([1.0, 1.0, 1.0], [0, 512, 0])\n self._check([0.0, 1.5, 0.0], [0, 680, 0])\n self._check([0.0, 1.5, 0.0], [0, 681, 0])\n self._check([0.0, 1.5, 0.0], [0, 682, 0])\n self._check([0.0, 1.5, 0.0], [0, 683, 0])\n self._check([0.0, 1.5, 0.0], [0, 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 341, 0])\n self._check([0.0, 1.5, 0.0], [0, 342, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])\n self._check([0.0, -1.0, 0.0], [0, 500, 0])\n self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])\n self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])\n for idx in range(100):\n data = [random.randint(0, 1023) for r in range(3)]\n coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for\n r in range(3)]\n self._check(coeff, data)\n",
"step-2": "<mask token>\n\n\nclass DotProductTest(simtest):\n\n def _set_coeff(self, c):\n cq = (c * 32).astype(numpy.uint8)\n self.dev.set('DotProductTest', 'c0', cq[0])\n self.dev.set('DotProductTest', 'c1', cq[1])\n self.dev.set('DotProductTest', 'c2', cq[2])\n return cq\n <mask token>\n\n def _check(self, c, d):\n d = numpy.array(d)\n ds = d.copy()\n ds[d > 511] = d[d > 511] - 1024\n c = numpy.array(c)\n cq = self._set_coeff(c)\n cs = cq.astype(numpy.int8)\n cs[cq > 127] = cq[cq > 127] - 256\n self._set_data(d)\n do = self.dev.get('DotProductTest', 'datao_uu')\n data_uu = min(1023, sum(cq * d) / 32)\n self.assertTrue(do == data_uu, 'UU')\n data_su = max(0, min(1023, sum(cs * d) / 32))\n do = self.dev.get('DotProductTest', 'datao_su')\n self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +\n str(do) + ' ' + str(data_su))\n data_us = max(-512, min(511, sum(cq * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_us')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_us, 'US')\n data_ss = max(-512, min(511, sum(cs * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_ss')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_ss, 'SS')\n\n def testDotProduct(self):\n \"\"\"Setup up Dot Product with various input and test output matches expectation.\"\"\"\n self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])\n self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])\n self._check([0, 0, 0], [1023, 1023, 1023])\n self._check([1 / 32.0, 0, 0], [1, 100, 100])\n self._check([1.0, 0, 0], [1, 100, 100])\n self._check([0, 1.0, 0], [1, 100, 100])\n self._check([0, 0, 1.0], [1, 100, 100])\n self._check([1.0, 1.0, 1.0], [513, 513, 513])\n self._check([1.0, 1.0, 1.0], [512, 512, 512])\n self._check([1.0, 1.0, 1.0], [0, 512, 0])\n self._check([0.0, 1.5, 0.0], [0, 680, 0])\n self._check([0.0, 1.5, 0.0], [0, 681, 0])\n self._check([0.0, 1.5, 0.0], [0, 682, 0])\n self._check([0.0, 1.5, 0.0], [0, 683, 0])\n self._check([0.0, 1.5, 0.0], [0, 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 341, 0])\n self._check([0.0, 1.5, 0.0], [0, 342, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])\n self._check([0.0, -1.0, 0.0], [0, 500, 0])\n self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])\n self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])\n for idx in range(100):\n data = [random.randint(0, 1023) for r in range(3)]\n coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for\n r in range(3)]\n self._check(coeff, data)\n",
"step-3": "<mask token>\n\n\nclass DotProductTest(simtest):\n\n def _set_coeff(self, c):\n cq = (c * 32).astype(numpy.uint8)\n self.dev.set('DotProductTest', 'c0', cq[0])\n self.dev.set('DotProductTest', 'c1', cq[1])\n self.dev.set('DotProductTest', 'c2', cq[2])\n return cq\n\n def _set_data(self, d):\n self.dev.set('DotProductTest', 'd0', d[0])\n self.dev.set('DotProductTest', 'd1', d[1])\n self.dev.set('DotProductTest', 'd2', d[2])\n\n def _check(self, c, d):\n d = numpy.array(d)\n ds = d.copy()\n ds[d > 511] = d[d > 511] - 1024\n c = numpy.array(c)\n cq = self._set_coeff(c)\n cs = cq.astype(numpy.int8)\n cs[cq > 127] = cq[cq > 127] - 256\n self._set_data(d)\n do = self.dev.get('DotProductTest', 'datao_uu')\n data_uu = min(1023, sum(cq * d) / 32)\n self.assertTrue(do == data_uu, 'UU')\n data_su = max(0, min(1023, sum(cs * d) / 32))\n do = self.dev.get('DotProductTest', 'datao_su')\n self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +\n str(do) + ' ' + str(data_su))\n data_us = max(-512, min(511, sum(cq * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_us')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_us, 'US')\n data_ss = max(-512, min(511, sum(cs * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_ss')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_ss, 'SS')\n\n def testDotProduct(self):\n \"\"\"Setup up Dot Product with various input and test output matches expectation.\"\"\"\n self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])\n self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])\n self._check([0, 0, 0], [1023, 1023, 1023])\n self._check([1 / 32.0, 0, 0], [1, 100, 100])\n self._check([1.0, 0, 0], [1, 100, 100])\n self._check([0, 1.0, 0], [1, 100, 100])\n self._check([0, 0, 1.0], [1, 100, 100])\n self._check([1.0, 1.0, 1.0], [513, 513, 513])\n self._check([1.0, 1.0, 1.0], [512, 512, 512])\n self._check([1.0, 1.0, 1.0], [0, 512, 0])\n self._check([0.0, 1.5, 0.0], [0, 680, 0])\n self._check([0.0, 1.5, 0.0], [0, 681, 0])\n self._check([0.0, 1.5, 0.0], [0, 682, 0])\n self._check([0.0, 1.5, 0.0], [0, 683, 0])\n self._check([0.0, 1.5, 0.0], [0, 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 341, 0])\n self._check([0.0, 1.5, 0.0], [0, 342, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])\n self._check([0.0, -1.0, 0.0], [0, 500, 0])\n self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])\n self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])\n for idx in range(100):\n data = [random.randint(0, 1023) for r in range(3)]\n coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for\n r in range(3)]\n self._check(coeff, data)\n",
"step-4": "from basetest import simtest\nimport testutil\nimport logging, random\nfrom nitro_parts.lib.imager import ccm as CCM\nimport numpy\n\n\nclass DotProductTest(simtest):\n\n def _set_coeff(self, c):\n cq = (c * 32).astype(numpy.uint8)\n self.dev.set('DotProductTest', 'c0', cq[0])\n self.dev.set('DotProductTest', 'c1', cq[1])\n self.dev.set('DotProductTest', 'c2', cq[2])\n return cq\n\n def _set_data(self, d):\n self.dev.set('DotProductTest', 'd0', d[0])\n self.dev.set('DotProductTest', 'd1', d[1])\n self.dev.set('DotProductTest', 'd2', d[2])\n\n def _check(self, c, d):\n d = numpy.array(d)\n ds = d.copy()\n ds[d > 511] = d[d > 511] - 1024\n c = numpy.array(c)\n cq = self._set_coeff(c)\n cs = cq.astype(numpy.int8)\n cs[cq > 127] = cq[cq > 127] - 256\n self._set_data(d)\n do = self.dev.get('DotProductTest', 'datao_uu')\n data_uu = min(1023, sum(cq * d) / 32)\n self.assertTrue(do == data_uu, 'UU')\n data_su = max(0, min(1023, sum(cs * d) / 32))\n do = self.dev.get('DotProductTest', 'datao_su')\n self.assertTrue(do == data_su, 'SU ' + str(d) + ' ' + str(cs) + ' ' +\n str(do) + ' ' + str(data_su))\n data_us = max(-512, min(511, sum(cq * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_us')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_us, 'US')\n data_ss = max(-512, min(511, sum(cs * ds) / 32))\n do = self.dev.get('DotProductTest', 'datao_ss')\n if do > 511:\n do = do - 1024\n self.assertTrue(do == data_ss, 'SS')\n\n def testDotProduct(self):\n \"\"\"Setup up Dot Product with various input and test output matches expectation.\"\"\"\n self._check([0.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([0.125, 0.75, 0.125], [1023, 1023, 1023])\n self._check([1 / 32.0, 1.0, 0.0], [1023, 1023, 1023])\n self._check([1.0, 1.0, 1.0], [1023, 1023, 1023])\n self._check([0, 0, 0], [1023, 1023, 1023])\n self._check([1 / 32.0, 0, 0], [1, 100, 100])\n self._check([1.0, 0, 0], [1, 100, 100])\n self._check([0, 1.0, 0], [1, 100, 100])\n self._check([0, 0, 1.0], [1, 100, 100])\n self._check([1.0, 1.0, 1.0], [513, 513, 513])\n self._check([1.0, 1.0, 1.0], [512, 512, 512])\n self._check([1.0, 1.0, 1.0], [0, 512, 0])\n self._check([0.0, 1.5, 0.0], [0, 680, 0])\n self._check([0.0, 1.5, 0.0], [0, 681, 0])\n self._check([0.0, 1.5, 0.0], [0, 682, 0])\n self._check([0.0, 1.5, 0.0], [0, 683, 0])\n self._check([0.0, 1.5, 0.0], [0, 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 341, 0])\n self._check([0.0, 1.5, 0.0], [0, 342, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 338, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 339, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 340, 0])\n self._check([0.0, 1.5, 0.0], [0, 1023 - 341, 0])\n self._check([0.0, -1.0, 0.0], [0, 500, 0])\n self._check([1 / 32.0, -1.0, 1 / 32.0], [500, 500, 500])\n self._check([-1 / 32.0, -1.0, -1 / 32.0], [400, 400, 400])\n for idx in range(100):\n data = [random.randint(0, 1023) for r in range(3)]\n coeff = [max(-2.0, min(127 / 32.0, random.random() * 4 - 2)) for\n r in range(3)]\n self._check(coeff, data)\n",
"step-5": "from basetest import simtest\nimport testutil\nimport logging, random\nfrom nitro_parts.lib.imager import ccm as CCM\nimport numpy\n\n\n###############################################################################\nclass DotProductTest(simtest):\n\n def _set_coeff(self, c):\n cq = (c * 32).astype(numpy.uint8)\n self.dev.set(\"DotProductTest\",\"c0\", cq[0])\n self.dev.set(\"DotProductTest\",\"c1\", cq[1])\n self.dev.set(\"DotProductTest\",\"c2\", cq[2])\n return cq\n \n def _set_data(self, d):\n self.dev.set(\"DotProductTest\",\"d0\", d[0])\n self.dev.set(\"DotProductTest\",\"d1\", d[1])\n self.dev.set(\"DotProductTest\",\"d2\", d[2])\n\n def _check(self, c, d):\n d = numpy.array(d)\n ds = d.copy()\n ds[d>511] = d[d>511] - 1024\n\n c = numpy.array(c)\n cq = self._set_coeff(c)\n cs = cq.astype(numpy.int8)\n cs[cq>127] = cq[cq>127] - 256\n \n self._set_data(d)\n do = self.dev.get(\"DotProductTest\",\"datao_uu\")\n data_uu = min(1023, sum(cq * d)/32)\n #print \"UU\", do, data_uu\n self.assertTrue( do == data_uu, \"UU\")\n\n data_su = max(0, min(1023, sum(cs * d)/32))\n do = self.dev.get(\"DotProductTest\", \"datao_su\")\n #print \"SU\", d, cs, do, data_su\n self.assertTrue( do == data_su, \"SU \" + str(d) + \" \" + str(cs) + \" \" + str(do) + \" \" + str(data_su))\n\n data_us = max(-512, min(511, sum(cq * ds)/32))\n do = self.dev.get(\"DotProductTest\", \"datao_us\")\n if(do > 511): do = do - 1024\n #print \"US\", do, ds, data_us\n self.assertTrue( do == data_us, \"US\")\n\n data_ss = max(-512, min(511, sum(cs * ds)/32))\n do = self.dev.get(\"DotProductTest\", \"datao_ss\")\n if(do > 511): do = do - 1024\n #print \"SS\", ds, cq, cs, do, data_ss\n self.assertTrue( do == data_ss, \"SS\")\n \n def testDotProduct(self):\n \"\"\"Setup up Dot Product with various input and test output matches expectation.\"\"\"\n\n self._check([0.000, 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([0.125, 0.750, 0.125], [ 1023, 1023, 1023 ])\n self._check([1/32., 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([1.000, 1.000, 1.000], [ 1023, 1023, 1023 ])\n self._check([0, 0, 0], [ 1023, 1023, 1023 ])\n self._check([1/32., 0, 0], [ 1, 100, 100 ])\n self._check([1.0, 0, 0], [ 1, 100, 100 ])\n self._check([0, 1.0, 0], [ 1, 100, 100 ])\n self._check([0, 0, 1.0], [ 1, 100, 100 ])\n self._check([1.000, 1.000, 1.000], [ 513, 513, 513 ])\n self._check([1.000, 1.000, 1.000], [ 512, 512, 512 ])\n self._check([1.000, 1.000, 1.000], [ 0, 512, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 680, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 681, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 682, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 683, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 341, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 342, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 1023-338, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-341, 0 ])\n\n self._check([0.000, -1.0, 0.000], [ 0, 500, 0 ])\n self._check([1/32., -1.0, 1/32.], [ 500, 500, 500 ])\n self._check([-1/32., -1.0, -1/32.], [ 400, 400, 400 ])\n\n for idx in range(100):\n data = [ random.randint(0,1023) for r in range(3) ]\n coeff = [ max(-2.0, min(127/32., random.random() * 4 - 2)) for r in range(3) ]\n #print coeff, data\n self._check(coeff, data)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
treeCounter(1, 1)
treeCounter(3, 1)
treeCounter(5, 1)
treeCounter(7, 1)
treeCounter(1, 2)
<|reserved_special_token_1|>
data = ['........#.............#........',
'...#....#...#....#.............', '.#..#...#............#.....#..#',
'..#......#..##............###..', '..........#......#..#..#.......',
'.#..#.......#.........#.#......', '.........#..#....##..#.##....#.',
'..#....##...#..................', '##..........#.##...#....##..#..',
'...#....#...#..............#...', '...........................#..#',
'..##.##.#..................#...', '...#.##..#............#........',
'........#.......#...#.....##.#.', '.##..........#......#.......#..',
'...#..........#...#..#.......#.', '......#...#...#.##.......#.#...',
'........#...#...#...##.........', '#..............#.#....#.......#',
'..#..#..#.#....#...............', '.....#........#...#..........#.',
'##......#...#..#.##.......#....', '..#.#.....#.#.............#.#.#',
'#..#..##......##...#...........', '..#......#........#.....#......',
'.....#.......#....#.#...#......', '...#........#...........#...#..',
'.......#.#...........###....#..', '...#...........##....##........',
'#....#..####....#.....#..#....#', '..........#...........#........',
'...#.......#....#.#.........#..', '....#...#.......#..###.........',
'......#......#..#......#..#....', '...#.....#............#..#.....',
'...#.#.#.#..#.......#.....#....', '#....##...#.........#...##.....',
'#..#.......#..#..#..#...##.....', '#.......#............#.....#...',
'.#........##....##...#........#', '.....#...#.....................',
'.......#........#..............', '.....#............#.#.#...#.#..',
'.....##..#.............#.......', '..#.##..#........#..#...#......',
'.........#.#....#...........#..', '.#.....#..#....#.....#...#.....',
'....#.#................#.......', '...............##......#...#...',
'.##...#...#.......##.#....#....', '............#........#.......#.',
'......##.#.#...................', '.#.#..............#.......#....',
'#.....#...#.......#..#...#.....', '.............#....#..#......#..',
'........#...##................#', '.......#...#..#..##............',
'..#..#...##...#..#.#.....#...#.', '.#.#...#.........#.#...........',
'...###....#.......#...#........', '........#......##.#...#..##..#.',
'.....................#.#.......', '.............#...........#...#.',
'#..#..#.....#.#...#............', '...#....#.....#...........#....',
'..##.....##...#......#..##.....', '#.....#.....###.#.....#....##..',
'.#...........###...............', '..................#..##.#...#..',
'................#....##.#......', '.#.#.#...#....#.........#..#.#.',
'#.......#........##............', '.......##.#....#.#............#',
'..........#..##.#....#.........', '........##..#....#.............',
'.........#....#...........##...', '#.........#.#..#..#..........#.',
'.....#........#......#.........', '....#.#.#...............#......',
'.#..#..##...#.##..........#....', '..#....................#.#.....',
'.........#....#...........#.#.#', '........#....##.##.............',
'..#.....#.......#..#......#....', '#..........#.#.....#.#....#....',
'........##.#.....#..#.....#.#..', '...................#...#....#.#',
'............#..#....#...#...#..', '..............#.#.........#....',
'...#..#..#.#..##..##...........', '.#...........................#.',
'.#.......#...........#....#.#.#', '......#..#...#........#...##...',
'.........#......#.#.......#...#', '...#..##................#......',
'.............#.#..##....#.#....', '...............#..#......#.....',
'.#......#.#.#....#........#....', '........#..#.##..#..#.........#',
'...#....#.#...#..#.......#..#..', '..#...##.........#..#...#......',
'...#...........#.............#.', '....#.....................#....',
'.....#..#...............#.#...#', '....#..........#........#......',
'..#....#........##..##.........', '...#....#..#.#.......#...#.....',
'..#........#....#...##....#.#..', '.#...#........##.....#....###..',
'#....#....##......#........#...', '.........#..#.#..........#....#',
'....#...#.....#.......##.......', '..............#..........#.##..',
'#...#..#..............#......#.', '.................#......##....#',
'..#..##..#.......#..#.#......#.', '.............#........#.....#.#',
'.#.##............#..#..........', '..#...#...........#..##........',
'.#....#...#....#.......#.......', '...#.#..#..#..#....#.....#..#..',
'....#..##..............#...#...', '#..........###......###........',
'.##.##......#..#............#..', '.#...........#.#.....#...#.....',
'#.#..#...#............#........', '.........#...#...#..........##.',
'.......###..#..........#.......', '...........###.....#........#..',
'.#.............#.....#......#..', '...#.....#....#.#.........##...',
'....##..##...#.......##........', '......#....##.........#......#.',
'..........#.....##..#.....#..#.', '..........####...#..#.........#',
'.##....#..#.#...#.......#......', '...#.#.##.#.#...#....#.#.#.....',
'.........#...##........##.....#', '..#........#..........##...##.#',
'##...##..........#.#...........', '..............#......#.........',
'........#.....#.#.......#......', '.#...#.....#....#.#..#.........',
'.....#....................##...', '....#..................#.#...##',
'.....#............#..##........', '#..........#....#.#.......##.#.',
'....#..#.....................#.', '#..#....##.....#...............',
'..#...#..#..##....#.#..........', '.......#......#.#.......#.....#',
'...#.#.......#...#.##..........', '....#..........#....#.#.#......',
'.......#..#..........#..##.....', '#......#......#...#......#...#.',
'###..#....##......##........#..', '.#..........#.....#.......#.#..',
'.......#.....#.....#.#.........', '..#...#....#...................',
'..............#.##.............', '.#...#.......#.##...#.#.......#',
'.......#......................#', '....#.#...#.#........#.........',
'.#......#....#...#.............', '#.......#...###.....#.#.#..#...',
'#....##.#...............##.....', '..#.......#..................#.',
'.....####...............#......', '.##......#......#.#.......##.#.',
'#......##..###....#....#......#', '.##.......##.##...#.##.........',
'......##............#.......#..', '......#..#.....##.#............',
'.#..........#.....##...........', '#.........#......#......##.#...',
'.........#.......#..#......#.#.', '.........#.......#...........#.',
'.#..##.#..................##...', '.............#.............#...',
'.....##........#......##...##..', '..#..#.#.....#..#....#.........',
'.....#....#.....#.....#........', '#......##.....#....#....#......',
'#.................#..#.#......#', '.......#..#......#....#.#...#.#',
'....#.........#..#..........#.#', '##......#............#...#...#.',
'....##......#...#.....#....##..', '.#...##.........#..............',
'......#.....................#..', '..#..........###....#..........',
'#....#...#..#.............#....', '#........#.#......#....#.......',
'.#...#.......#..#...#.#...#..#.', '................##.#.....#.....',
'###.......#...#................', '...#.......#...#.#.....#.......',
'..#.........#.....#.#.......#..', '......#.......................#',
'#.....#.#..#....#.......#......', '...#....#..#....####...........',
'.............#.....#...##......', '.......#.........#...#..#......',
'.##..#.........#....#.#........', '....##...#.#...........#....#..',
'.........................##....', '..###.......##....#.#.........#',
'.#....#.#.#...........##....#..', '......#...#..#..#..#..#.......#',
'..#....#.#.......#..#..#..#...#', '.....##...#.##....#.#...#......',
'.........#..#....#..#..........', '.##..##.........#.#.....#......',
'..........#...##...#.#...#.....', '#.##..#..#.............#.......',
'...#...........#.......#......#', '.......#....#....#...##.......#',
'..#.##........###..#......#....', '...#...........###......#..#..#',
'.#.........#.#.........#.#.....', '##.......##.##.##......##......',
'............#...#..........#...', '....................#..........',
'...#..#...........#...#...#....', '.................#...#......###',
'...#................#.#.##.....', '...............#........#......',
'#.............##......#.#..#...', '..#.#.....#..#.##.....##...#...',
'......#.........#......#.......', '#.......#......#....#........#.',
'.#..##.....#.........#.........', '....##.##.#...#.........##.#...',
'...............#..#..#..##.....', '.#..#...............###........',
'.##............##..............', '...............#...##...#...#.#',
'..#.#......#.#..#.............#', '#.#..#..##.........#.#.#...#...',
'....##.#....................##.', '.........#..#.....#.....#..#..#',
'....#......#......#.##....#....', '........###..#.............#..#',
'##................#.........#..', '#.....#.......#....#...........',
'..#.......#..#........#....#...', '..#.#.##..#.#...##........#.##.',
'..#..........#............#....', '..........#...............##...',
'..........###........#.#.......', '.....###..#.............#......',
'##.............#...#.....#.....', '.....#......#....#........#.#..',
'............#..#..............#', '.................#...........##',
'#........#.........###.....#...', '..#.#..............##......#.#.',
'.#...........#.........#..##..#', '...............................',
'.#.....#..#....#....#......#...', '.#...#......#.#..#....#.......#',
'......#.##.......#......#......', '......#..###..#................',
'#..#.....#........##...#.......', '......##.........##....#...##..',
'.#..........#.................#', '#..#.......#...............#...',
'.........#..###....#.#.##.#....', '..#...#.##..##...............##',
'.........#.....................', '.#....##...#......#....#.......',
'............#..........#..#....', '...#......##....#....#........#',
'.#...................#.........', '#.#........###....#..........#.',
'.........#....#....#........##.', '.#....#..#.........#..#........',
'...............#..#...#..#...##', '.........#....##....#......#...',
'.#.............................', '...#........#...#.#...#.#..#...',
'.....#..##...#.#...............', '#.....#....#.........#.........',
'#...#...........##.........#...', '..##........#.#...#...#......#.',
'...........#.....#...#.#.......', '......###....#.....#...........',
'......##...#..........#....#.#.', '.......##..##..........#.......',
'....#............#..#....##....', '..##...................#.#.....',
'...#.#..#.#....................', '.#..##..#............##.###..#.',
'#.#...#....#.#..........#.#....', '........#....#.....#...........',
'..##....#...#.......#..........', '...........##.##....#..........',
'.....#............#............', '.......#.............#....#....',
'.................#......#......', '......##.......#....#..##...#..',
'.#..#....#.....................', '...#.#.#...#......##...........',
'##........##.#....#....#.......', '.......#.....#..#..#...#.##....',
'#..........#....#.#..#..#..#...', '...##..............#...........',
'.........#.....#.#....#.......#', '.........#....##..#..##..#.....',
'.....#......................#..', '...###...#..#......#...........',
'....#.....................#....', '...............................',
'..#.....###.......#..#....#....', '#..........#.................#.',
'......#.......###.......#..##..', '.............#.##..............',
'......#..#.#..#...........#....', '...#....##.#...#..#.#...#....#.',
'..................#...#....#.##', '......#.#....#.................',
'......#.#.....#.....#..##......', '#..##...........#..#.....#.##..']
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
treeCounter(1, 1)
treeCounter(3, 1)
treeCounter(5, 1)
treeCounter(7, 1)
treeCounter(1, 2)
<|reserved_special_token_1|>
data = [
"........#.............#........",
"...#....#...#....#.............",
".#..#...#............#.....#..#",
"..#......#..##............###..",
"..........#......#..#..#.......",
".#..#.......#.........#.#......",
".........#..#....##..#.##....#.",
"..#....##...#..................",
"##..........#.##...#....##..#..",
"...#....#...#..............#...",
"...........................#..#",
"..##.##.#..................#...",
"...#.##..#............#........",
"........#.......#...#.....##.#.",
".##..........#......#.......#..",
"...#..........#...#..#.......#.",
"......#...#...#.##.......#.#...",
"........#...#...#...##.........",
"#..............#.#....#.......#",
"..#..#..#.#....#...............",
".....#........#...#..........#.",
"##......#...#..#.##.......#....",
"..#.#.....#.#.............#.#.#",
"#..#..##......##...#...........",
"..#......#........#.....#......",
".....#.......#....#.#...#......",
"...#........#...........#...#..",
".......#.#...........###....#..",
"...#...........##....##........",
"#....#..####....#.....#..#....#",
"..........#...........#........",
"...#.......#....#.#.........#..",
"....#...#.......#..###.........",
"......#......#..#......#..#....",
"...#.....#............#..#.....",
"...#.#.#.#..#.......#.....#....",
"#....##...#.........#...##.....",
"#..#.......#..#..#..#...##.....",
"#.......#............#.....#...",
".#........##....##...#........#",
".....#...#.....................",
".......#........#..............",
".....#............#.#.#...#.#..",
".....##..#.............#.......",
"..#.##..#........#..#...#......",
".........#.#....#...........#..",
".#.....#..#....#.....#...#.....",
"....#.#................#.......",
"...............##......#...#...",
".##...#...#.......##.#....#....",
"............#........#.......#.",
"......##.#.#...................",
".#.#..............#.......#....",
"#.....#...#.......#..#...#.....",
".............#....#..#......#..",
"........#...##................#",
".......#...#..#..##............",
"..#..#...##...#..#.#.....#...#.",
".#.#...#.........#.#...........",
"...###....#.......#...#........",
"........#......##.#...#..##..#.",
".....................#.#.......",
".............#...........#...#.",
"#..#..#.....#.#...#............",
"...#....#.....#...........#....",
"..##.....##...#......#..##.....",
"#.....#.....###.#.....#....##..",
".#...........###...............",
"..................#..##.#...#..",
"................#....##.#......",
".#.#.#...#....#.........#..#.#.",
"#.......#........##............",
".......##.#....#.#............#",
"..........#..##.#....#.........",
"........##..#....#.............",
".........#....#...........##...",
"#.........#.#..#..#..........#.",
".....#........#......#.........",
"....#.#.#...............#......",
".#..#..##...#.##..........#....",
"..#....................#.#.....",
".........#....#...........#.#.#",
"........#....##.##.............",
"..#.....#.......#..#......#....",
"#..........#.#.....#.#....#....",
"........##.#.....#..#.....#.#..",
"...................#...#....#.#",
"............#..#....#...#...#..",
"..............#.#.........#....",
"...#..#..#.#..##..##...........",
".#...........................#.",
".#.......#...........#....#.#.#",
"......#..#...#........#...##...",
".........#......#.#.......#...#",
"...#..##................#......",
".............#.#..##....#.#....",
"...............#..#......#.....",
".#......#.#.#....#........#....",
"........#..#.##..#..#.........#",
"...#....#.#...#..#.......#..#..",
"..#...##.........#..#...#......",
"...#...........#.............#.",
"....#.....................#....",
".....#..#...............#.#...#",
"....#..........#........#......",
"..#....#........##..##.........",
"...#....#..#.#.......#...#.....",
"..#........#....#...##....#.#..",
".#...#........##.....#....###..",
"#....#....##......#........#...",
".........#..#.#..........#....#",
"....#...#.....#.......##.......",
"..............#..........#.##..",
"#...#..#..............#......#.",
".................#......##....#",
"..#..##..#.......#..#.#......#.",
".............#........#.....#.#",
".#.##............#..#..........",
"..#...#...........#..##........",
".#....#...#....#.......#.......",
"...#.#..#..#..#....#.....#..#..",
"....#..##..............#...#...",
"#..........###......###........",
".##.##......#..#............#..",
".#...........#.#.....#...#.....",
"#.#..#...#............#........",
".........#...#...#..........##.",
".......###..#..........#.......",
"...........###.....#........#..",
".#.............#.....#......#..",
"...#.....#....#.#.........##...",
"....##..##...#.......##........",
"......#....##.........#......#.",
"..........#.....##..#.....#..#.",
"..........####...#..#.........#",
".##....#..#.#...#.......#......",
"...#.#.##.#.#...#....#.#.#.....",
".........#...##........##.....#",
"..#........#..........##...##.#",
"##...##..........#.#...........",
"..............#......#.........",
"........#.....#.#.......#......",
".#...#.....#....#.#..#.........",
".....#....................##...",
"....#..................#.#...##",
".....#............#..##........",
"#..........#....#.#.......##.#.",
"....#..#.....................#.",
"#..#....##.....#...............",
"..#...#..#..##....#.#..........",
".......#......#.#.......#.....#",
"...#.#.......#...#.##..........",
"....#..........#....#.#.#......",
".......#..#..........#..##.....",
"#......#......#...#......#...#.",
"###..#....##......##........#..",
".#..........#.....#.......#.#..",
".......#.....#.....#.#.........",
"..#...#....#...................",
"..............#.##.............",
".#...#.......#.##...#.#.......#",
".......#......................#",
"....#.#...#.#........#.........",
".#......#....#...#.............",
"#.......#...###.....#.#.#..#...",
"#....##.#...............##.....",
"..#.......#..................#.",
".....####...............#......",
".##......#......#.#.......##.#.",
"#......##..###....#....#......#",
".##.......##.##...#.##.........",
"......##............#.......#..",
"......#..#.....##.#............",
".#..........#.....##...........",
"#.........#......#......##.#...",
".........#.......#..#......#.#.",
".........#.......#...........#.",
".#..##.#..................##...",
".............#.............#...",
".....##........#......##...##..",
"..#..#.#.....#..#....#.........",
".....#....#.....#.....#........",
"#......##.....#....#....#......",
"#.................#..#.#......#",
".......#..#......#....#.#...#.#",
"....#.........#..#..........#.#",
"##......#............#...#...#.",
"....##......#...#.....#....##..",
".#...##.........#..............",
"......#.....................#..",
"..#..........###....#..........",
"#....#...#..#.............#....",
"#........#.#......#....#.......",
".#...#.......#..#...#.#...#..#.",
"................##.#.....#.....",
"###.......#...#................",
"...#.......#...#.#.....#.......",
"..#.........#.....#.#.......#..",
"......#.......................#",
"#.....#.#..#....#.......#......",
"...#....#..#....####...........",
".............#.....#...##......",
".......#.........#...#..#......",
".##..#.........#....#.#........",
"....##...#.#...........#....#..",
".........................##....",
"..###.......##....#.#.........#",
".#....#.#.#...........##....#..",
"......#...#..#..#..#..#.......#",
"..#....#.#.......#..#..#..#...#",
".....##...#.##....#.#...#......",
".........#..#....#..#..........",
".##..##.........#.#.....#......",
"..........#...##...#.#...#.....",
"#.##..#..#.............#.......",
"...#...........#.......#......#",
".......#....#....#...##.......#",
"..#.##........###..#......#....",
"...#...........###......#..#..#",
".#.........#.#.........#.#.....",
"##.......##.##.##......##......",
"............#...#..........#...",
"....................#..........",
"...#..#...........#...#...#....",
".................#...#......###",
"...#................#.#.##.....",
"...............#........#......",
"#.............##......#.#..#...",
"..#.#.....#..#.##.....##...#...",
"......#.........#......#.......",
"#.......#......#....#........#.",
".#..##.....#.........#.........",
"....##.##.#...#.........##.#...",
"...............#..#..#..##.....",
".#..#...............###........",
".##............##..............",
"...............#...##...#...#.#",
"..#.#......#.#..#.............#",
"#.#..#..##.........#.#.#...#...",
"....##.#....................##.",
".........#..#.....#.....#..#..#",
"....#......#......#.##....#....",
"........###..#.............#..#",
"##................#.........#..",
"#.....#.......#....#...........",
"..#.......#..#........#....#...",
"..#.#.##..#.#...##........#.##.",
"..#..........#............#....",
"..........#...............##...",
"..........###........#.#.......",
".....###..#.............#......",
"##.............#...#.....#.....",
".....#......#....#........#.#..",
"............#..#..............#",
".................#...........##",
"#........#.........###.....#...",
"..#.#..............##......#.#.",
".#...........#.........#..##..#",
"...............................",
".#.....#..#....#....#......#...",
".#...#......#.#..#....#.......#",
"......#.##.......#......#......",
"......#..###..#................",
"#..#.....#........##...#.......",
"......##.........##....#...##..",
".#..........#.................#",
"#..#.......#...............#...",
".........#..###....#.#.##.#....",
"..#...#.##..##...............##",
".........#.....................",
".#....##...#......#....#.......",
"............#..........#..#....",
"...#......##....#....#........#",
".#...................#.........",
"#.#........###....#..........#.",
".........#....#....#........##.",
".#....#..#.........#..#........",
"...............#..#...#..#...##",
".........#....##....#......#...",
".#.............................",
"...#........#...#.#...#.#..#...",
".....#..##...#.#...............",
"#.....#....#.........#.........",
"#...#...........##.........#...",
"..##........#.#...#...#......#.",
"...........#.....#...#.#.......",
"......###....#.....#...........",
"......##...#..........#....#.#.",
".......##..##..........#.......",
"....#............#..#....##....",
"..##...................#.#.....",
"...#.#..#.#....................",
".#..##..#............##.###..#.",
"#.#...#....#.#..........#.#....",
"........#....#.....#...........",
"..##....#...#.......#..........",
"...........##.##....#..........",
".....#............#............",
".......#.............#....#....",
".................#......#......",
"......##.......#....#..##...#..",
".#..#....#.....................",
"...#.#.#...#......##...........",
"##........##.#....#....#.......",
".......#.....#..#..#...#.##....",
"#..........#....#.#..#..#..#...",
"...##..............#...........",
".........#.....#.#....#.......#",
".........#....##..#..##..#.....",
".....#......................#..",
"...###...#..#......#...........",
"....#.....................#....",
"...............................",
"..#.....###.......#..#....#....",
"#..........#.................#.",
"......#.......###.......#..##..",
".............#.##..............",
"......#..#.#..#...........#....",
"...#....##.#...#..#.#...#....#.",
"..................#...#....#.##",
"......#.#....#.................",
"......#.#.....#.....#..##......",
"#..##...........#..#.....#.##..",
]
def treeCounter(moveRight, moveDown):
row = 0
index = 0
trees = 0
finished = False
while not finished:
row += moveDown
if len(data) > row:
index = (index + moveRight) % len(data[row])
if data[row][index] == '#':
trees += 1
else:
finished = True
print(trees)
treeCounter(1,1)
treeCounter(3,1)
treeCounter(5,1)
treeCounter(7,1)
treeCounter(1,2)
|
flexible
|
{
"blob_id": "c22651437094723b711a959e031f1c7f928f735a",
"index": 7645,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\ntreeCounter(1, 1)\ntreeCounter(3, 1)\ntreeCounter(5, 1)\ntreeCounter(7, 1)\ntreeCounter(1, 2)\n",
"step-4": "data = ['........#.............#........',\n '...#....#...#....#.............', '.#..#...#............#.....#..#',\n '..#......#..##............###..', '..........#......#..#..#.......',\n '.#..#.......#.........#.#......', '.........#..#....##..#.##....#.',\n '..#....##...#..................', '##..........#.##...#....##..#..',\n '...#....#...#..............#...', '...........................#..#',\n '..##.##.#..................#...', '...#.##..#............#........',\n '........#.......#...#.....##.#.', '.##..........#......#.......#..',\n '...#..........#...#..#.......#.', '......#...#...#.##.......#.#...',\n '........#...#...#...##.........', '#..............#.#....#.......#',\n '..#..#..#.#....#...............', '.....#........#...#..........#.',\n '##......#...#..#.##.......#....', '..#.#.....#.#.............#.#.#',\n '#..#..##......##...#...........', '..#......#........#.....#......',\n '.....#.......#....#.#...#......', '...#........#...........#...#..',\n '.......#.#...........###....#..', '...#...........##....##........',\n '#....#..####....#.....#..#....#', '..........#...........#........',\n '...#.......#....#.#.........#..', '....#...#.......#..###.........',\n '......#......#..#......#..#....', '...#.....#............#..#.....',\n '...#.#.#.#..#.......#.....#....', '#....##...#.........#...##.....',\n '#..#.......#..#..#..#...##.....', '#.......#............#.....#...',\n '.#........##....##...#........#', '.....#...#.....................',\n '.......#........#..............', '.....#............#.#.#...#.#..',\n '.....##..#.............#.......', '..#.##..#........#..#...#......',\n '.........#.#....#...........#..', '.#.....#..#....#.....#...#.....',\n '....#.#................#.......', '...............##......#...#...',\n '.##...#...#.......##.#....#....', '............#........#.......#.',\n '......##.#.#...................', '.#.#..............#.......#....',\n '#.....#...#.......#..#...#.....', '.............#....#..#......#..',\n '........#...##................#', '.......#...#..#..##............',\n '..#..#...##...#..#.#.....#...#.', '.#.#...#.........#.#...........',\n '...###....#.......#...#........', '........#......##.#...#..##..#.',\n '.....................#.#.......', '.............#...........#...#.',\n '#..#..#.....#.#...#............', '...#....#.....#...........#....',\n '..##.....##...#......#..##.....', '#.....#.....###.#.....#....##..',\n '.#...........###...............', '..................#..##.#...#..',\n '................#....##.#......', '.#.#.#...#....#.........#..#.#.',\n '#.......#........##............', '.......##.#....#.#............#',\n '..........#..##.#....#.........', '........##..#....#.............',\n '.........#....#...........##...', '#.........#.#..#..#..........#.',\n '.....#........#......#.........', '....#.#.#...............#......',\n '.#..#..##...#.##..........#....', '..#....................#.#.....',\n '.........#....#...........#.#.#', '........#....##.##.............',\n '..#.....#.......#..#......#....', '#..........#.#.....#.#....#....',\n '........##.#.....#..#.....#.#..', '...................#...#....#.#',\n '............#..#....#...#...#..', '..............#.#.........#....',\n '...#..#..#.#..##..##...........', '.#...........................#.',\n '.#.......#...........#....#.#.#', '......#..#...#........#...##...',\n '.........#......#.#.......#...#', '...#..##................#......',\n '.............#.#..##....#.#....', '...............#..#......#.....',\n '.#......#.#.#....#........#....', '........#..#.##..#..#.........#',\n '...#....#.#...#..#.......#..#..', '..#...##.........#..#...#......',\n '...#...........#.............#.', '....#.....................#....',\n '.....#..#...............#.#...#', '....#..........#........#......',\n '..#....#........##..##.........', '...#....#..#.#.......#...#.....',\n '..#........#....#...##....#.#..', '.#...#........##.....#....###..',\n '#....#....##......#........#...', '.........#..#.#..........#....#',\n '....#...#.....#.......##.......', '..............#..........#.##..',\n '#...#..#..............#......#.', '.................#......##....#',\n '..#..##..#.......#..#.#......#.', '.............#........#.....#.#',\n '.#.##............#..#..........', '..#...#...........#..##........',\n '.#....#...#....#.......#.......', '...#.#..#..#..#....#.....#..#..',\n '....#..##..............#...#...', '#..........###......###........',\n '.##.##......#..#............#..', '.#...........#.#.....#...#.....',\n '#.#..#...#............#........', '.........#...#...#..........##.',\n '.......###..#..........#.......', '...........###.....#........#..',\n '.#.............#.....#......#..', '...#.....#....#.#.........##...',\n '....##..##...#.......##........', '......#....##.........#......#.',\n '..........#.....##..#.....#..#.', '..........####...#..#.........#',\n '.##....#..#.#...#.......#......', '...#.#.##.#.#...#....#.#.#.....',\n '.........#...##........##.....#', '..#........#..........##...##.#',\n '##...##..........#.#...........', '..............#......#.........',\n '........#.....#.#.......#......', '.#...#.....#....#.#..#.........',\n '.....#....................##...', '....#..................#.#...##',\n '.....#............#..##........', '#..........#....#.#.......##.#.',\n '....#..#.....................#.', '#..#....##.....#...............',\n '..#...#..#..##....#.#..........', '.......#......#.#.......#.....#',\n '...#.#.......#...#.##..........', '....#..........#....#.#.#......',\n '.......#..#..........#..##.....', '#......#......#...#......#...#.',\n '###..#....##......##........#..', '.#..........#.....#.......#.#..',\n '.......#.....#.....#.#.........', '..#...#....#...................',\n '..............#.##.............', '.#...#.......#.##...#.#.......#',\n '.......#......................#', '....#.#...#.#........#.........',\n '.#......#....#...#.............', '#.......#...###.....#.#.#..#...',\n '#....##.#...............##.....', '..#.......#..................#.',\n '.....####...............#......', '.##......#......#.#.......##.#.',\n '#......##..###....#....#......#', '.##.......##.##...#.##.........',\n '......##............#.......#..', '......#..#.....##.#............',\n '.#..........#.....##...........', '#.........#......#......##.#...',\n '.........#.......#..#......#.#.', '.........#.......#...........#.',\n '.#..##.#..................##...', '.............#.............#...',\n '.....##........#......##...##..', '..#..#.#.....#..#....#.........',\n '.....#....#.....#.....#........', '#......##.....#....#....#......',\n '#.................#..#.#......#', '.......#..#......#....#.#...#.#',\n '....#.........#..#..........#.#', '##......#............#...#...#.',\n '....##......#...#.....#....##..', '.#...##.........#..............',\n '......#.....................#..', '..#..........###....#..........',\n '#....#...#..#.............#....', '#........#.#......#....#.......',\n '.#...#.......#..#...#.#...#..#.', '................##.#.....#.....',\n '###.......#...#................', '...#.......#...#.#.....#.......',\n '..#.........#.....#.#.......#..', '......#.......................#',\n '#.....#.#..#....#.......#......', '...#....#..#....####...........',\n '.............#.....#...##......', '.......#.........#...#..#......',\n '.##..#.........#....#.#........', '....##...#.#...........#....#..',\n '.........................##....', '..###.......##....#.#.........#',\n '.#....#.#.#...........##....#..', '......#...#..#..#..#..#.......#',\n '..#....#.#.......#..#..#..#...#', '.....##...#.##....#.#...#......',\n '.........#..#....#..#..........', '.##..##.........#.#.....#......',\n '..........#...##...#.#...#.....', '#.##..#..#.............#.......',\n '...#...........#.......#......#', '.......#....#....#...##.......#',\n '..#.##........###..#......#....', '...#...........###......#..#..#',\n '.#.........#.#.........#.#.....', '##.......##.##.##......##......',\n '............#...#..........#...', '....................#..........',\n '...#..#...........#...#...#....', '.................#...#......###',\n '...#................#.#.##.....', '...............#........#......',\n '#.............##......#.#..#...', '..#.#.....#..#.##.....##...#...',\n '......#.........#......#.......', '#.......#......#....#........#.',\n '.#..##.....#.........#.........', '....##.##.#...#.........##.#...',\n '...............#..#..#..##.....', '.#..#...............###........',\n '.##............##..............', '...............#...##...#...#.#',\n '..#.#......#.#..#.............#', '#.#..#..##.........#.#.#...#...',\n '....##.#....................##.', '.........#..#.....#.....#..#..#',\n '....#......#......#.##....#....', '........###..#.............#..#',\n '##................#.........#..', '#.....#.......#....#...........',\n '..#.......#..#........#....#...', '..#.#.##..#.#...##........#.##.',\n '..#..........#............#....', '..........#...............##...',\n '..........###........#.#.......', '.....###..#.............#......',\n '##.............#...#.....#.....', '.....#......#....#........#.#..',\n '............#..#..............#', '.................#...........##',\n '#........#.........###.....#...', '..#.#..............##......#.#.',\n '.#...........#.........#..##..#', '...............................',\n '.#.....#..#....#....#......#...', '.#...#......#.#..#....#.......#',\n '......#.##.......#......#......', '......#..###..#................',\n '#..#.....#........##...#.......', '......##.........##....#...##..',\n '.#..........#.................#', '#..#.......#...............#...',\n '.........#..###....#.#.##.#....', '..#...#.##..##...............##',\n '.........#.....................', '.#....##...#......#....#.......',\n '............#..........#..#....', '...#......##....#....#........#',\n '.#...................#.........', '#.#........###....#..........#.',\n '.........#....#....#........##.', '.#....#..#.........#..#........',\n '...............#..#...#..#...##', '.........#....##....#......#...',\n '.#.............................', '...#........#...#.#...#.#..#...',\n '.....#..##...#.#...............', '#.....#....#.........#.........',\n '#...#...........##.........#...', '..##........#.#...#...#......#.',\n '...........#.....#...#.#.......', '......###....#.....#...........',\n '......##...#..........#....#.#.', '.......##..##..........#.......',\n '....#............#..#....##....', '..##...................#.#.....',\n '...#.#..#.#....................', '.#..##..#............##.###..#.',\n '#.#...#....#.#..........#.#....', '........#....#.....#...........',\n '..##....#...#.......#..........', '...........##.##....#..........',\n '.....#............#............', '.......#.............#....#....',\n '.................#......#......', '......##.......#....#..##...#..',\n '.#..#....#.....................', '...#.#.#...#......##...........',\n '##........##.#....#....#.......', '.......#.....#..#..#...#.##....',\n '#..........#....#.#..#..#..#...', '...##..............#...........',\n '.........#.....#.#....#.......#', '.........#....##..#..##..#.....',\n '.....#......................#..', '...###...#..#......#...........',\n '....#.....................#....', '...............................',\n '..#.....###.......#..#....#....', '#..........#.................#.',\n '......#.......###.......#..##..', '.............#.##..............',\n '......#..#.#..#...........#....', '...#....##.#...#..#.#...#....#.',\n '..................#...#....#.##', '......#.#....#.................',\n '......#.#.....#.....#..##......', '#..##...........#..#.....#.##..']\n\n\ndef treeCounter(moveRight, moveDown):\n row = 0\n index = 0\n trees = 0\n finished = False\n while not finished:\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n print(trees)\n\n\ntreeCounter(1, 1)\ntreeCounter(3, 1)\ntreeCounter(5, 1)\ntreeCounter(7, 1)\ntreeCounter(1, 2)\n",
"step-5": "data = [\n \"........#.............#........\",\n \"...#....#...#....#.............\",\n \".#..#...#............#.....#..#\",\n \"..#......#..##............###..\",\n \"..........#......#..#..#.......\",\n \".#..#.......#.........#.#......\",\n \".........#..#....##..#.##....#.\",\n \"..#....##...#..................\",\n \"##..........#.##...#....##..#..\",\n \"...#....#...#..............#...\",\n \"...........................#..#\",\n \"..##.##.#..................#...\",\n \"...#.##..#............#........\",\n \"........#.......#...#.....##.#.\",\n \".##..........#......#.......#..\",\n \"...#..........#...#..#.......#.\",\n \"......#...#...#.##.......#.#...\",\n \"........#...#...#...##.........\",\n \"#..............#.#....#.......#\",\n \"..#..#..#.#....#...............\",\n \".....#........#...#..........#.\",\n \"##......#...#..#.##.......#....\",\n \"..#.#.....#.#.............#.#.#\",\n \"#..#..##......##...#...........\",\n \"..#......#........#.....#......\",\n \".....#.......#....#.#...#......\",\n \"...#........#...........#...#..\",\n \".......#.#...........###....#..\",\n \"...#...........##....##........\",\n \"#....#..####....#.....#..#....#\",\n \"..........#...........#........\",\n \"...#.......#....#.#.........#..\",\n \"....#...#.......#..###.........\",\n \"......#......#..#......#..#....\",\n \"...#.....#............#..#.....\",\n \"...#.#.#.#..#.......#.....#....\",\n \"#....##...#.........#...##.....\",\n \"#..#.......#..#..#..#...##.....\",\n \"#.......#............#.....#...\",\n \".#........##....##...#........#\",\n \".....#...#.....................\",\n \".......#........#..............\",\n \".....#............#.#.#...#.#..\",\n \".....##..#.............#.......\",\n \"..#.##..#........#..#...#......\",\n \".........#.#....#...........#..\",\n \".#.....#..#....#.....#...#.....\",\n \"....#.#................#.......\",\n \"...............##......#...#...\",\n \".##...#...#.......##.#....#....\",\n \"............#........#.......#.\",\n \"......##.#.#...................\",\n \".#.#..............#.......#....\",\n \"#.....#...#.......#..#...#.....\",\n \".............#....#..#......#..\",\n \"........#...##................#\",\n \".......#...#..#..##............\",\n \"..#..#...##...#..#.#.....#...#.\",\n \".#.#...#.........#.#...........\",\n \"...###....#.......#...#........\",\n \"........#......##.#...#..##..#.\",\n \".....................#.#.......\",\n \".............#...........#...#.\",\n \"#..#..#.....#.#...#............\",\n \"...#....#.....#...........#....\",\n \"..##.....##...#......#..##.....\",\n \"#.....#.....###.#.....#....##..\",\n \".#...........###...............\",\n \"..................#..##.#...#..\",\n \"................#....##.#......\",\n \".#.#.#...#....#.........#..#.#.\",\n \"#.......#........##............\",\n \".......##.#....#.#............#\",\n \"..........#..##.#....#.........\",\n \"........##..#....#.............\",\n \".........#....#...........##...\",\n \"#.........#.#..#..#..........#.\",\n \".....#........#......#.........\",\n \"....#.#.#...............#......\",\n \".#..#..##...#.##..........#....\",\n \"..#....................#.#.....\",\n \".........#....#...........#.#.#\",\n \"........#....##.##.............\",\n \"..#.....#.......#..#......#....\",\n \"#..........#.#.....#.#....#....\",\n \"........##.#.....#..#.....#.#..\",\n \"...................#...#....#.#\",\n \"............#..#....#...#...#..\",\n \"..............#.#.........#....\",\n \"...#..#..#.#..##..##...........\",\n \".#...........................#.\",\n \".#.......#...........#....#.#.#\",\n \"......#..#...#........#...##...\",\n \".........#......#.#.......#...#\",\n \"...#..##................#......\",\n \".............#.#..##....#.#....\",\n \"...............#..#......#.....\",\n \".#......#.#.#....#........#....\",\n \"........#..#.##..#..#.........#\",\n \"...#....#.#...#..#.......#..#..\",\n \"..#...##.........#..#...#......\",\n \"...#...........#.............#.\",\n \"....#.....................#....\",\n \".....#..#...............#.#...#\",\n \"....#..........#........#......\",\n \"..#....#........##..##.........\",\n \"...#....#..#.#.......#...#.....\",\n \"..#........#....#...##....#.#..\",\n \".#...#........##.....#....###..\",\n \"#....#....##......#........#...\",\n \".........#..#.#..........#....#\",\n \"....#...#.....#.......##.......\",\n \"..............#..........#.##..\",\n \"#...#..#..............#......#.\",\n \".................#......##....#\",\n \"..#..##..#.......#..#.#......#.\",\n \".............#........#.....#.#\",\n \".#.##............#..#..........\",\n \"..#...#...........#..##........\",\n \".#....#...#....#.......#.......\",\n \"...#.#..#..#..#....#.....#..#..\",\n \"....#..##..............#...#...\",\n \"#..........###......###........\",\n \".##.##......#..#............#..\",\n \".#...........#.#.....#...#.....\",\n \"#.#..#...#............#........\",\n \".........#...#...#..........##.\",\n \".......###..#..........#.......\",\n \"...........###.....#........#..\",\n \".#.............#.....#......#..\",\n \"...#.....#....#.#.........##...\",\n \"....##..##...#.......##........\",\n \"......#....##.........#......#.\",\n \"..........#.....##..#.....#..#.\",\n \"..........####...#..#.........#\",\n \".##....#..#.#...#.......#......\",\n \"...#.#.##.#.#...#....#.#.#.....\",\n \".........#...##........##.....#\",\n \"..#........#..........##...##.#\",\n \"##...##..........#.#...........\",\n \"..............#......#.........\",\n \"........#.....#.#.......#......\",\n \".#...#.....#....#.#..#.........\",\n \".....#....................##...\",\n \"....#..................#.#...##\",\n \".....#............#..##........\",\n \"#..........#....#.#.......##.#.\",\n \"....#..#.....................#.\",\n \"#..#....##.....#...............\",\n \"..#...#..#..##....#.#..........\",\n \".......#......#.#.......#.....#\",\n \"...#.#.......#...#.##..........\",\n \"....#..........#....#.#.#......\",\n \".......#..#..........#..##.....\",\n \"#......#......#...#......#...#.\",\n \"###..#....##......##........#..\",\n \".#..........#.....#.......#.#..\",\n \".......#.....#.....#.#.........\",\n \"..#...#....#...................\",\n \"..............#.##.............\",\n \".#...#.......#.##...#.#.......#\",\n \".......#......................#\",\n \"....#.#...#.#........#.........\",\n \".#......#....#...#.............\",\n \"#.......#...###.....#.#.#..#...\",\n \"#....##.#...............##.....\",\n \"..#.......#..................#.\",\n \".....####...............#......\",\n \".##......#......#.#.......##.#.\",\n \"#......##..###....#....#......#\",\n \".##.......##.##...#.##.........\",\n \"......##............#.......#..\",\n \"......#..#.....##.#............\",\n \".#..........#.....##...........\",\n \"#.........#......#......##.#...\",\n \".........#.......#..#......#.#.\",\n \".........#.......#...........#.\",\n \".#..##.#..................##...\",\n \".............#.............#...\",\n \".....##........#......##...##..\",\n \"..#..#.#.....#..#....#.........\",\n \".....#....#.....#.....#........\",\n \"#......##.....#....#....#......\",\n \"#.................#..#.#......#\",\n \".......#..#......#....#.#...#.#\",\n \"....#.........#..#..........#.#\",\n \"##......#............#...#...#.\",\n \"....##......#...#.....#....##..\",\n \".#...##.........#..............\",\n \"......#.....................#..\",\n \"..#..........###....#..........\",\n \"#....#...#..#.............#....\",\n \"#........#.#......#....#.......\",\n \".#...#.......#..#...#.#...#..#.\",\n \"................##.#.....#.....\",\n \"###.......#...#................\",\n \"...#.......#...#.#.....#.......\",\n \"..#.........#.....#.#.......#..\",\n \"......#.......................#\",\n \"#.....#.#..#....#.......#......\",\n \"...#....#..#....####...........\",\n \".............#.....#...##......\",\n \".......#.........#...#..#......\",\n \".##..#.........#....#.#........\",\n \"....##...#.#...........#....#..\",\n \".........................##....\",\n \"..###.......##....#.#.........#\",\n \".#....#.#.#...........##....#..\",\n \"......#...#..#..#..#..#.......#\",\n \"..#....#.#.......#..#..#..#...#\",\n \".....##...#.##....#.#...#......\",\n \".........#..#....#..#..........\",\n \".##..##.........#.#.....#......\",\n \"..........#...##...#.#...#.....\",\n \"#.##..#..#.............#.......\",\n \"...#...........#.......#......#\",\n \".......#....#....#...##.......#\",\n \"..#.##........###..#......#....\",\n \"...#...........###......#..#..#\",\n \".#.........#.#.........#.#.....\",\n \"##.......##.##.##......##......\",\n \"............#...#..........#...\",\n \"....................#..........\",\n \"...#..#...........#...#...#....\",\n \".................#...#......###\",\n \"...#................#.#.##.....\",\n \"...............#........#......\",\n \"#.............##......#.#..#...\",\n \"..#.#.....#..#.##.....##...#...\",\n \"......#.........#......#.......\",\n \"#.......#......#....#........#.\",\n \".#..##.....#.........#.........\",\n \"....##.##.#...#.........##.#...\",\n \"...............#..#..#..##.....\",\n \".#..#...............###........\",\n \".##............##..............\",\n \"...............#...##...#...#.#\",\n \"..#.#......#.#..#.............#\",\n \"#.#..#..##.........#.#.#...#...\",\n \"....##.#....................##.\",\n \".........#..#.....#.....#..#..#\",\n \"....#......#......#.##....#....\",\n \"........###..#.............#..#\",\n \"##................#.........#..\",\n \"#.....#.......#....#...........\",\n \"..#.......#..#........#....#...\",\n \"..#.#.##..#.#...##........#.##.\",\n \"..#..........#............#....\",\n \"..........#...............##...\",\n \"..........###........#.#.......\",\n \".....###..#.............#......\",\n \"##.............#...#.....#.....\",\n \".....#......#....#........#.#..\",\n \"............#..#..............#\",\n \".................#...........##\",\n \"#........#.........###.....#...\",\n \"..#.#..............##......#.#.\",\n \".#...........#.........#..##..#\",\n \"...............................\",\n \".#.....#..#....#....#......#...\",\n \".#...#......#.#..#....#.......#\",\n \"......#.##.......#......#......\",\n \"......#..###..#................\",\n \"#..#.....#........##...#.......\",\n \"......##.........##....#...##..\",\n \".#..........#.................#\",\n \"#..#.......#...............#...\",\n \".........#..###....#.#.##.#....\",\n \"..#...#.##..##...............##\",\n \".........#.....................\",\n \".#....##...#......#....#.......\",\n \"............#..........#..#....\",\n \"...#......##....#....#........#\",\n \".#...................#.........\",\n \"#.#........###....#..........#.\",\n \".........#....#....#........##.\",\n \".#....#..#.........#..#........\",\n \"...............#..#...#..#...##\",\n \".........#....##....#......#...\",\n \".#.............................\",\n \"...#........#...#.#...#.#..#...\",\n \".....#..##...#.#...............\",\n \"#.....#....#.........#.........\",\n \"#...#...........##.........#...\",\n \"..##........#.#...#...#......#.\",\n \"...........#.....#...#.#.......\",\n \"......###....#.....#...........\",\n \"......##...#..........#....#.#.\",\n \".......##..##..........#.......\",\n \"....#............#..#....##....\",\n \"..##...................#.#.....\",\n \"...#.#..#.#....................\",\n \".#..##..#............##.###..#.\",\n \"#.#...#....#.#..........#.#....\",\n \"........#....#.....#...........\",\n \"..##....#...#.......#..........\",\n \"...........##.##....#..........\",\n \".....#............#............\",\n \".......#.............#....#....\",\n \".................#......#......\",\n \"......##.......#....#..##...#..\",\n \".#..#....#.....................\",\n \"...#.#.#...#......##...........\",\n \"##........##.#....#....#.......\",\n \".......#.....#..#..#...#.##....\",\n \"#..........#....#.#..#..#..#...\",\n \"...##..............#...........\",\n \".........#.....#.#....#.......#\",\n \".........#....##..#..##..#.....\",\n \".....#......................#..\",\n \"...###...#..#......#...........\",\n \"....#.....................#....\",\n \"...............................\",\n \"..#.....###.......#..#....#....\",\n \"#..........#.................#.\",\n \"......#.......###.......#..##..\",\n \".............#.##..............\",\n \"......#..#.#..#...........#....\",\n \"...#....##.#...#..#.#...#....#.\",\n \"..................#...#....#.##\",\n \"......#.#....#.................\",\n \"......#.#.....#.....#..##......\",\n \"#..##...........#..#.....#.##..\",\n]\n\ndef treeCounter(moveRight, moveDown):\n\n row = 0\n index = 0\n trees = 0\n\n finished = False\n\n while not finished:\n\n row += moveDown\n if len(data) > row:\n index = (index + moveRight) % len(data[row])\n if data[row][index] == '#':\n trees += 1\n else:\n finished = True\n\n print(trees)\n\n\ntreeCounter(1,1)\ntreeCounter(3,1)\ntreeCounter(5,1)\ntreeCounter(7,1)\ntreeCounter(1,2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
print(cv2.__version__)
image = cv2.imread("download.jpeg", 1)
print(image)
print(image.shape)
print(image[0])
print("~~~~~~~~~~~~~~~")
print(image.shape[0])
print("~~~~~~~~~~~~~~~")
print(len(image))
|
normal
|
{
"blob_id": "0b0ae6101fd80bdbcf37b935268f3e49230599fb",
"index": 5715,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(cv2.__version__)\n<mask token>\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-3": "<mask token>\nprint(cv2.__version__)\nimage = cv2.imread('download.jpeg', 1)\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-4": "import cv2\nprint(cv2.__version__)\nimage = cv2.imread('download.jpeg', 1)\nprint(image)\nprint(image.shape)\nprint(image[0])\nprint('~~~~~~~~~~~~~~~')\nprint(image.shape[0])\nprint('~~~~~~~~~~~~~~~')\nprint(len(image))\n",
"step-5": "import cv2\nprint(cv2.__version__)\n\nimage = cv2.imread(\"download.jpeg\", 1)\nprint(image)\nprint(image.shape)\n\nprint(image[0])\nprint(\"~~~~~~~~~~~~~~~\")\nprint(image.shape[0])\nprint(\"~~~~~~~~~~~~~~~\")\nprint(len(image))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@log.route('/login', methods=['GET', 'POST'])
def login():
print(request.path)
if request.method == 'GET':
return render_template('exec/login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
if username == 'henry' and password == '123':
session['username'] = 'henry'
return redirect('/detail')
return 'Failed'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log = Blueprint('login', __name__)
@log.route('/login', methods=['GET', 'POST'])
def login():
print(request.path)
if request.method == 'GET':
return render_template('exec/login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
if username == 'henry' and password == '123':
session['username'] = 'henry'
return redirect('/detail')
return 'Failed'
<|reserved_special_token_1|>
from flask import Blueprint, request, render_template, session, redirect
log = Blueprint('login', __name__)
@log.route('/login', methods=['GET', 'POST'])
def login():
print(request.path)
if request.method == 'GET':
return render_template('exec/login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
if username == 'henry' and password == '123':
session['username'] = 'henry'
return redirect('/detail')
return 'Failed'
<|reserved_special_token_1|>
from flask import Blueprint, request, render_template, session, redirect
log = Blueprint('login', __name__, )
@log.route('/login', methods=['GET', 'POST'])
def login():
print(request.path, )
if request.method == 'GET':
return render_template('exec/login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
if username == 'henry' and password == '123':
session['username'] = 'henry'
return redirect('/detail')
return 'Failed'
|
flexible
|
{
"blob_id": "763e2db4eb9ad5953273fb310c8e9714964a39e6",
"index": 9576,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@log.route('/login', methods=['GET', 'POST'])\ndef login():\n print(request.path)\n if request.method == 'GET':\n return render_template('exec/login.html')\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n if username == 'henry' and password == '123':\n session['username'] = 'henry'\n return redirect('/detail')\n return 'Failed'\n",
"step-3": "<mask token>\nlog = Blueprint('login', __name__)\n\n\n@log.route('/login', methods=['GET', 'POST'])\ndef login():\n print(request.path)\n if request.method == 'GET':\n return render_template('exec/login.html')\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n if username == 'henry' and password == '123':\n session['username'] = 'henry'\n return redirect('/detail')\n return 'Failed'\n",
"step-4": "from flask import Blueprint, request, render_template, session, redirect\nlog = Blueprint('login', __name__)\n\n\n@log.route('/login', methods=['GET', 'POST'])\ndef login():\n print(request.path)\n if request.method == 'GET':\n return render_template('exec/login.html')\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n if username == 'henry' and password == '123':\n session['username'] = 'henry'\n return redirect('/detail')\n return 'Failed'\n",
"step-5": "from flask import Blueprint, request, render_template, session, redirect\n\nlog = Blueprint('login', __name__, )\n\n\n@log.route('/login', methods=['GET', 'POST'])\ndef login():\n print(request.path, )\n if request.method == 'GET':\n return render_template('exec/login.html')\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n if username == 'henry' and password == '123':\n session['username'] = 'henry'\n return redirect('/detail')\n return 'Failed'\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
def math_builtins():
assert abs(-123) == 123
assert abs(-123.456) == 123.456
assert abs(2+3j) == math.sqrt(2**2 + 3**2)
assert divmod(5, 2) == (2, 1)
assert max(1, 2, 3, 4) == 4
assert min(1, 2, 3, 4) == 1
a = 2
b = 3
c = 7
assert pow(a, b) == a ** b
assert pow(a, b, c) == a ** b % c
assert round(123.05) == 123
assert round(123.65) == 124
assert round(-123.05) == -123
assert round(-123.65) == -124
assert round(123.65, 1) == 123.7
assert round(-123.65, 1) == -123.7
lst = [1, 2, 3]
assert sum(lst) == 6
def math_module_constants():
assert math.pi == 3.141592653589793
assert math.tau == 6.283185307179586
assert math.e == 2.718281828459045
x = float('NaN')
assert math.isnan(x)
x = float('inf')
assert math.isinf(x)
x = math.inf
assert math.isinf(x)
x = -math.inf
assert math.isinf(x)
def math_module():
x = -1.23
assert math.fabs(x) == 1.23
if __name__ == "__main__":
math_builtins()
math_module_constants()
math_module()
|
normal
|
{
"blob_id": "c77db71844c65eb96946ac0cc384de43ad49ca99",
"index": 6007,
"step-1": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == '__main__':\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-4": "import math\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2 + 3.0j) == math.sqrt(2 ** 2 + 3 ** 2)\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n assert round(123.05) == 123\n assert round(123.65) == 124\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n x = float('NaN')\n assert math.isnan(x)\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == '__main__':\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-5": "import math\n\n\ndef math_builtins():\n assert abs(-123) == 123\n assert abs(-123.456) == 123.456\n assert abs(2+3j) == math.sqrt(2**2 + 3**2)\n\n assert divmod(5, 2) == (2, 1)\n assert max(1, 2, 3, 4) == 4\n assert min(1, 2, 3, 4) == 1\n\n a = 2\n b = 3\n c = 7\n assert pow(a, b) == a ** b\n assert pow(a, b, c) == a ** b % c\n\n assert round(123.05) == 123\n assert round(123.65) == 124\n\n assert round(-123.05) == -123\n assert round(-123.65) == -124\n\n assert round(123.65, 1) == 123.7\n assert round(-123.65, 1) == -123.7\n\n lst = [1, 2, 3]\n assert sum(lst) == 6\n\n\ndef math_module_constants():\n assert math.pi == 3.141592653589793\n assert math.tau == 6.283185307179586\n assert math.e == 2.718281828459045\n\n x = float('NaN')\n assert math.isnan(x)\n\n x = float('inf')\n assert math.isinf(x)\n x = math.inf\n assert math.isinf(x)\n x = -math.inf\n assert math.isinf(x)\n\n\ndef math_module():\n x = -1.23\n assert math.fabs(x) == 1.23\n\n\nif __name__ == \"__main__\":\n math_builtins()\n math_module_constants()\n math_module()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.conf.urls import url
from django.contrib import admin
from comments.api.views import CommentListAPIView, CommentDetailAPIView
urlpatterns = [
url(r'^$', CommentListAPIView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', CommentDetailAPIView, name='detail'),
]
|
normal
|
{
"blob_id": "e08820ff4fb35a3770fcb110ef7181aad1abbae5",
"index": 8778,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(\n '^(?P<pk>\\\\d+)/$', CommentDetailAPIView, name='detail')]\n",
"step-3": "from django.conf.urls import url\nfrom django.contrib import admin\nfrom comments.api.views import CommentListAPIView, CommentDetailAPIView\nurlpatterns = [url('^$', CommentListAPIView.as_view(), name='list'), url(\n '^(?P<pk>\\\\d+)/$', CommentDetailAPIView, name='detail')]\n",
"step-4": "from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom comments.api.views import CommentListAPIView, CommentDetailAPIView\n\nurlpatterns = [\n url(r'^$', CommentListAPIView.as_view(), name='list'),\n url(r'^(?P<pk>\\d+)/$', CommentDetailAPIView, name='detail'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
no=int(input("enter no:"))
rev=0
while no!=0:
r=no%10
no=no//10
rev=rev*10+r
print("reverse no is:",rev)
|
normal
|
{
"blob_id": "b2371f9c774c605a52ff1a4fae2dd44a856076aa",
"index": 5522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile no != 0:\n r = no % 10\n no = no // 10\n rev = rev * 10 + r\nprint('reverse no is:', rev)\n",
"step-3": "no = int(input('enter no:'))\nrev = 0\nwhile no != 0:\n r = no % 10\n no = no // 10\n rev = rev * 10 + r\nprint('reverse no is:', rev)\n",
"step-4": "no=int(input(\"enter no:\"))\nrev=0\nwhile no!=0:\n r=no%10\n no=no//10\n rev=rev*10+r\nprint(\"reverse no is:\",rev)\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask, render_template, flash, request
import pandas as pd
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
df = pd.read_csv('data1.csv')
try:
row = df[df['District'] == 'Delhi'].index[0]
except:
print("now city found")
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
class ReusableForm(FlaskForm)
name = StringField('name', validators=[validators.required()])
submit = SubmitField('Enter')
@app.route("/", methods=['GET', 'POST'])
def hello():
form = ReusableForm( )
if form.is_submitted():
city = request.form['name'].capitalize()
try:
row = df[df['District'] == city].index[0]
print(city)
cases = df.at[row, 'count(district)']
print(cases)
except:
cases = -1
print("cases are", cases)
flash("cases are " + str(cases))
return render_template('data.html', form=form)
if __name__ == "__main__":
app.run()
|
normal
|
{
"blob_id": "8240e6483f47abbe12e7bef02493bd147ad3fec6",
"index": 6998,
"step-1": "from flask import Flask, render_template, flash, request\nimport pandas as pd\nfrom wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\n\ndf = pd.read_csv('data1.csv')\ntry:\n row = df[df['District'] == 'Delhi'].index[0]\nexcept:\n print(\"now city found\")\n\nDEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nclass ReusableForm(FlaskForm)\n name = StringField('name', validators=[validators.required()])\n submit = SubmitField('Enter')\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef hello():\n form = ReusableForm( )\n\n if form.is_submitted():\n city = request.form['name'].capitalize()\n try:\n row = df[df['District'] == city].index[0]\n print(city)\n cases = df.at[row, 'count(district)']\n print(cases)\n except:\n cases = -1\n print(\"cases are\", cases)\n flash(\"cases are \" + str(cases))\n\n\t return render_template('data.html', form=form)\n\nif __name__ == \"__main__\":\n app.run()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
env = gym.make('CartPole-v0')
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import tensorflow as tf
from collections import deque
import random
import dqn
import gym
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
openAI gym 'cart pole-v0'
"""
import numpy as np
import tensorflow as tf
from collections import deque
import random
import dqn
import gym
import matplotlib.pyplot as plt
# define environment
env = gym.make('CartPole-v0')
# define parameters
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
# DISCOUNT_RATE : y = (1-dr)x + dr(r+f(x+1))
# REPLAY_MEMORY : memory size
# BATCH_SIZE : BATCH- training
# TARGET_UPDATE_FREQUENCY : targetW <- mainW each n
# MAX_EPISODE : n of trainning epoch
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
# copy targetW from mainW values
def get_copy_var_ops(src_scope_name:str, dest_scope_name:str)->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN:dqn.DQN, targetDQN:dqn.DQN, train_batch:list)->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE*np.max(targetDQN.predict(next_states), axis=1)*~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X,y)
def bot_play(mainDQN:dqn.DQN, env:gym.Env)->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print("\n Total Score : {}".format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="main")
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="target")
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops("main","target")
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1./ ((episode/10)+1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done))
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(" EP : {} | steps : {} | EP loss : {}".format(episode+1, step_count, loss), end="\r")
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward>199:
print("\n game cleared, avg_reward : {}, episode : {}".format(avg_reward, episode+1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1,2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "9a40861239268aa62075b77b3ed452f31bb14fac",
"index": 2458,
"step-1": "<mask token>\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nenv = gym.make('CartPole-v0')\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nimport random\nimport dqn\nimport gym\nimport matplotlib.pyplot as plt\nenv = gym.make('CartPole-v0')\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nopenAI gym 'cart pole-v0'\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nimport random\nimport dqn\nimport gym\nimport matplotlib.pyplot as plt\n\n# define environment\nenv = gym.make('CartPole-v0')\n\n# define parameters\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\n\n# DISCOUNT_RATE : y = (1-dr)x + dr(r+f(x+1))\n# REPLAY_MEMORY : memory size\n# BATCH_SIZE : BATCH- training\n# TARGET_UPDATE_FREQUENCY : targetW <- mainW each n\n# MAX_EPISODE : n of trainning epoch\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n# copy targetW from mainW values\ndef get_copy_var_ops(src_scope_name:str, dest_scope_name:str)->list:\n\tholder = []\n\tsrc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n\t\tscope = src_scope_name)\n\tdest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n\t\tscope = dest_scope_name)\n\tfor src_var, dest_var in zip(src_vars, dest_vars):\n\t\tholder.append(dest_var.assign(src_var.value()))\n\treturn holder\n\ndef replay_train(mainDQN:dqn.DQN, targetDQN:dqn.DQN, train_batch:list)->float:\n\tstates = np.vstack([x[0] for x in train_batch])\n\tactions = np.array([x[1] for x in train_batch])\n\trewards = np.array([x[2] for x in train_batch])\n\tnext_states = np.vstack([x[3] for x in train_batch])\n\tdone = np.array([x[4] for x in train_batch])\n\n\tQ_target = rewards + DISCOUNT_RATE*np.max(targetDQN.predict(next_states), axis=1)*~done\n\tX = states\n\ty = mainDQN.predict(states)\n\ty[np.arange(len(states)), actions] = Q_target\n\n\treturn mainDQN.update(X,y)\n\ndef bot_play(mainDQN:dqn.DQN, env:gym.Env)->None:\n\tstate = env.reset()\n\treward_sum = 0\n\n\twhile True:\n\t\tenv.render()\n\t\taction = np.argmax(mainDQN.predict(state))\n\t\tstate, reward, done, _ = env.step(action)\n\t\treward_sum += reward\n\n\t\tif done:\n\t\t\tprint(\"\\n Total Score : {}\".format(reward_sum))\n\t\t\tbreak\n\ndef main():\n\treplay_buffer = deque(maxlen=REPLAY_MEMORY)\n\tlast_100 = deque(maxlen=100)\n\tstep_list = []\n\tloss_list = []\n\n\twith tf.Session() as sess:\n\t\tmainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"main\")\n\t\ttargetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"target\")\n\t\tsess.run(tf.global_variables_initializer())\n\n\t\tcopy_ops = get_copy_var_ops(\"main\",\"target\")\n\t\tsess.run(copy_ops)\n\t\t\n\t\tfor episode in range(MAX_EPISODE):\n\t\t\te = 1./ ((episode/10)+1)\n\t\t\tdone = False\n\t\t\tstep_count = 0\n\t\t\tstate = env.reset()\n\t\t\tloss = 0\n\n\t\t\twhile not done:\n\t\t\t\tif np.random.rand() < e:\n\t\t\t\t\taction = env.action_space.sample()\n\t\t\t\telse:\n\t\t\t\t\taction = np.argmax(mainDQN.predict(state))\n\n\t\t\t\tnext_states, reward, done, _ = env.step(action)\n\n\t\t\t\tif done:\n\t\t\t\t\treward = -1\n\t\t\t\treplay_buffer.append((state, action, reward, next_states, done))\n\n\t\t\t\tif len(replay_buffer) > BATCH_SIZE:\n\t\t\t\t\tminibatch = random.sample(replay_buffer, BATCH_SIZE)\n\t\t\t\t\tloss, _ = replay_train(mainDQN, targetDQN, minibatch)\n\n\t\t\t\tif step_count % TARGET_UPDATE_FREQUENCY == 0:\n\t\t\t\t\tsess.run(copy_ops)\n\n\t\t\t\tstate = next_states\n\t\t\t\tstep_count += 1\n\n\t\t\tprint(\" EP : {} | steps : {} | EP loss : {}\".format(episode+1, step_count, loss), end=\"\\r\")\n\n\t\t\tstep_list.append(step_count)\n\t\t\tloss_list.append(loss)\n\t\t\tlast_100.append(step_count)\n\n\t\t\tif len(last_100) == last_100.maxlen:\n\t\t\t\tavg_reward = np.mean(last_100)\n\t\t\t\tif avg_reward>199:\n\t\t\t\t\tprint(\"\\n game cleared, avg_reward : {}, episode : {}\".format(avg_reward, episode+1))\n\t\t\t\t\tbreak\n\n\t\tstep_array = np.asarray(step_list)\n\t\tloss_array = np.asarray(loss_list)\n\t\t_, plot = plt.subplots(1,2)\n\t\tplot[0].plot(step_array)\n\t\tplot[1].plot(loss_array)\n\t\tplt.show()\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('produksi', '0055_auto_20190409_1316')]
operations = [migrations.RemoveField(model_name='transisi', name=
'status_perpindahan')]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('produksi', '0055_auto_20190409_1316')]
operations = [migrations.RemoveField(model_name='transisi', name=
'status_perpindahan')]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-11 03:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('produksi', '0055_auto_20190409_1316'),
]
operations = [
migrations.RemoveField(
model_name='transisi',
name='status_perpindahan',
),
]
|
flexible
|
{
"blob_id": "1eb5df463bbd39002c5dbc3f88459e2f26d4b465",
"index": 8505,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('produksi', '0055_auto_20190409_1316')]\n operations = [migrations.RemoveField(model_name='transisi', name=\n 'status_perpindahan')]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('produksi', '0055_auto_20190409_1316')]\n operations = [migrations.RemoveField(model_name='transisi', name=\n 'status_perpindahan')]\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Generated by Django 1.11.20 on 2019-04-11 03:58\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('produksi', '0055_auto_20190409_1316'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='transisi',\r\n name='status_perpindahan',\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
AuthorPath = 'data/Author.csv'
PaperPath = 'buff/Paper.TitleCut.csv'
PaperAuthorPath = 'data/PaperAuthor.csv'
AffilListPath = 'buff/AffilList2.csv'
StopwordPath = 'InternalData/en.lst'
|
normal
|
{
"blob_id": "690e7cc9047b3a445bf330524df52e2b359f1f13",
"index": 958,
"step-1": "<mask token>\n",
"step-2": "AuthorPath = 'data/Author.csv'\nPaperPath = 'buff/Paper.TitleCut.csv'\nPaperAuthorPath = 'data/PaperAuthor.csv'\nAffilListPath = 'buff/AffilList2.csv'\nStopwordPath = 'InternalData/en.lst'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import path
from external_api import views
urlpatterns = [path('darksky/', views.DarkSkyView.as_view())]
urlpatterns = format_suffix_patterns(urlpatterns)
|
normal
|
{
"blob_id": "e40f0a25d0c02f36c254e630133dc1fb11f29d4d",
"index": 8156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('darksky/', views.DarkSkyView.as_view())]\nurlpatterns = format_suffix_patterns(urlpatterns)\n",
"step-3": "from rest_framework.urlpatterns import format_suffix_patterns\nfrom django.urls import path\nfrom external_api import views\nurlpatterns = [path('darksky/', views.DarkSkyView.as_view())]\nurlpatterns = format_suffix_patterns(urlpatterns)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def getLectures(name, url):
urlprefix = 'http://www.ocw.titech.ac.jp'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
table = soup.find('table', class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td', class_='code').string
name = item.find('td', class_='course_title').a.string
lecture_url = urlprefix + item.find('td', class_='course_title').a[
'href']
teachers = [te.string for te in item.find('td', class_='lecturer').
find_all('a')]
quaterColumn = item.find('td', class_='opening_department')
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code:
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getGakuinList():
url = 'http://www.ocw.titech.ac.jp/'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
topMainNav = soup.find('ul', id='top-mein-navi')
gakubus = topMainNav.find_all(class_='gakubuBox')
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_='gakubuHead').span.string
if gakuin[-2:] != '学院':
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name': gakuin, 'url': gakuin_url})
return gakuinList
<|reserved_special_token_0|>
def getLectures(name, url):
urlprefix = 'http://www.ocw.titech.ac.jp'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
table = soup.find('table', class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td', class_='code').string
name = item.find('td', class_='course_title').a.string
lecture_url = urlprefix + item.find('td', class_='course_title').a[
'href']
teachers = [te.string for te in item.find('td', class_='lecturer').
find_all('a')]
quaterColumn = item.find('td', class_='opening_department')
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code:
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getGakuinList():
url = 'http://www.ocw.titech.ac.jp/'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
topMainNav = soup.find('ul', id='top-mein-navi')
gakubus = topMainNav.find_all(class_='gakubuBox')
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_='gakubuHead').span.string
if gakuin[-2:] != '学院':
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name': gakuin, 'url': gakuin_url})
return gakuinList
<|reserved_special_token_0|>
def getLectures(name, url):
urlprefix = 'http://www.ocw.titech.ac.jp'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
table = soup.find('table', class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td', class_='code').string
name = item.find('td', class_='course_title').a.string
lecture_url = urlprefix + item.find('td', class_='course_title').a[
'href']
teachers = [te.string for te in item.find('td', class_='lecturer').
find_all('a')]
quaterColumn = item.find('td', class_='opening_department')
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code:
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
if __name__ == '__main__':
getLectures('情報理工学院',
'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'
)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
<|reserved_special_token_0|>
def getGakuinList():
url = 'http://www.ocw.titech.ac.jp/'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
topMainNav = soup.find('ul', id='top-mein-navi')
gakubus = topMainNav.find_all(class_='gakubuBox')
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_='gakubuHead').span.string
if gakuin[-2:] != '学院':
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name': gakuin, 'url': gakuin_url})
return gakuinList
<|reserved_special_token_0|>
def getLectures(name, url):
urlprefix = 'http://www.ocw.titech.ac.jp'
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
table = soup.find('table', class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td', class_='code').string
name = item.find('td', class_='course_title').a.string
lecture_url = urlprefix + item.find('td', class_='course_title').a[
'href']
teachers = [te.string for te in item.find('td', class_='lecturer').
find_all('a')]
quaterColumn = item.find('td', class_='opening_department')
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code:
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
if __name__ == '__main__':
getLectures('情報理工学院',
'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'
)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
'''
OCWから学院一覧を取得するスクリプト(6個くらいだから必要ない気もする)
gakuinListの各要素は次のような辞書に鳴っている
{
'name' : 学院名,
'url' : その学院の授業の一覧のurl,
}
'''
def getGakuinList():
url = "http://www.ocw.titech.ac.jp/"
response = requests.get(url)
soup = BeautifulSoup(response.content,"lxml")
topMainNav = soup.find("ul",id="top-mein-navi")
gakubus = topMainNav.find_all(class_="gakubuBox")
gakuinList = []
for gakubu_div in gakubus:
gakuin = gakubu_div.find(class_="gakubuHead").span.string
if gakuin[-2::] != "学院":
continue
gakuin_url = url + gakubu_div.parent['href']
gakuinList.append({'name':gakuin,'url':gakuin_url})
return gakuinList
'''
学院名とurlを渡されたらその学院の授業一覧を持ってくる
'''
def getLectures(name,url):
urlprefix = "http://www.ocw.titech.ac.jp"
response = requests.get(url)
soup = BeautifulSoup(response.content,'lxml')
table = soup.find('table',class_='ranking-list').tbody
for item in table.find_all('tr'):
code = item.find('td',class_='code').string
name = item.find('td',class_='course_title').a.string #講義名
lecture_url = urlprefix + item.find('td',class_='course_title').a['href']
teachers = [te.string for te in item.find('td',class_='lecturer').find_all('a')]
quaterColumn = item.find('td',class_='opening_department') #TODO ちゃんととれてない
quater = quaterColumn.a.string if quaterColumn != None else ''
if not name or not code: # 文字列が空の場合はスキップ
continue
if code:
code = code.strip()
if name:
name = name.strip()
if quater:
quater = quater.strip()
print(name)
print(teachers)
print(lecture_url)
print(quater)
if __name__=='__main__':
#print(getGakuinList())
getLectures('情報理工学院','http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA')
|
flexible
|
{
"blob_id": "24274dddbeb1be743cfcac331ee688d48c9a46dd",
"index": 8647,
"step-1": "<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\nif __name__ == '__main__':\n getLectures('情報理工学院',\n 'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'\n )\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\n<mask token>\n\n\ndef getGakuinList():\n url = 'http://www.ocw.titech.ac.jp/'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n topMainNav = soup.find('ul', id='top-mein-navi')\n gakubus = topMainNav.find_all(class_='gakubuBox')\n gakuinList = []\n for gakubu_div in gakubus:\n gakuin = gakubu_div.find(class_='gakubuHead').span.string\n if gakuin[-2:] != '学院':\n continue\n gakuin_url = url + gakubu_div.parent['href']\n gakuinList.append({'name': gakuin, 'url': gakuin_url})\n return gakuinList\n\n\n<mask token>\n\n\ndef getLectures(name, url):\n urlprefix = 'http://www.ocw.titech.ac.jp'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n table = soup.find('table', class_='ranking-list').tbody\n for item in table.find_all('tr'):\n code = item.find('td', class_='code').string\n name = item.find('td', class_='course_title').a.string\n lecture_url = urlprefix + item.find('td', class_='course_title').a[\n 'href']\n teachers = [te.string for te in item.find('td', class_='lecturer').\n find_all('a')]\n quaterColumn = item.find('td', class_='opening_department')\n quater = quaterColumn.a.string if quaterColumn != None else ''\n if not name or not code:\n continue\n if code:\n code = code.strip()\n if name:\n name = name.strip()\n if quater:\n quater = quater.strip()\n print(name)\n print(teachers)\n print(lecture_url)\n print(quater)\n\n\nif __name__ == '__main__':\n getLectures('情報理工学院',\n 'http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA'\n )\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\n\n'''\nOCWから学院一覧を取得するスクリプト(6個くらいだから必要ない気もする)\ngakuinListの各要素は次のような辞書に鳴っている\n{\n\t'name' : 学院名,\n\t'url' : その学院の授業の一覧のurl,\n}\n'''\ndef getGakuinList():\n\turl = \"http://www.ocw.titech.ac.jp/\"\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.content,\"lxml\")\n\n\ttopMainNav = soup.find(\"ul\",id=\"top-mein-navi\")\n\n\tgakubus = topMainNav.find_all(class_=\"gakubuBox\")\n\n\tgakuinList = []\n\tfor gakubu_div in gakubus:\n\t\tgakuin = gakubu_div.find(class_=\"gakubuHead\").span.string\n\t\tif gakuin[-2::] != \"学院\":\n\t\t\tcontinue\n\t\tgakuin_url = url + gakubu_div.parent['href']\n\t\tgakuinList.append({'name':gakuin,'url':gakuin_url})\n\n\treturn gakuinList\n\n'''\n学院名とurlを渡されたらその学院の授業一覧を持ってくる\n'''\ndef getLectures(name,url):\n\turlprefix = \"http://www.ocw.titech.ac.jp\"\n\tresponse = requests.get(url)\n\tsoup = BeautifulSoup(response.content,'lxml')\n\ttable = soup.find('table',class_='ranking-list').tbody\n\n\tfor item in table.find_all('tr'):\n\t\tcode = item.find('td',class_='code').string\n\t\tname = item.find('td',class_='course_title').a.string #講義名\n\t\tlecture_url = urlprefix + item.find('td',class_='course_title').a['href']\n\t\tteachers = [te.string for te in item.find('td',class_='lecturer').find_all('a')]\n\t\tquaterColumn = item.find('td',class_='opening_department') #TODO ちゃんととれてない\n\t\tquater = quaterColumn.a.string if quaterColumn != None else ''\n\t\tif not name or not code:\t# 文字列が空の場合はスキップ\n\t\t\tcontinue\n\t\tif code:\n\t\t\tcode = code.strip()\n\t\tif name:\n\t\t\tname = name.strip()\n\t\tif quater:\n\t\t\tquater = quater.strip()\n\t\tprint(name)\n\t\tprint(teachers)\n\t\tprint(lecture_url)\n\t\tprint(quater)\n\n\nif __name__=='__main__':\n\t#print(getGakuinList())\n\tgetLectures('情報理工学院','http://www.ocw.titech.ac.jp/index.php?module=General&action=T0100&GakubuCD=4&lang=JA')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# We will try to implement add noise to audio file and filter it using Mean and Median Filters.
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.io.wavfile import read
from scipy.io.wavfile import write
rate,audio_original = read('Audio_Original.wav')
audio = audio_original[:,0]
write("Audio_Modified.wav",rate,audio)
print (audio.shape[0])
print (audio.shape[0]/rate) # Time of track
# print (audio.shape[1]) # No.of Channels
def Plot_Audio(audio): # Function to plot Audio Signal
s = audio.shape[0]
time = np.arange(s)
plt.plot(time,audio)
plt.show()
def Add_Noise(audio,mu = 0,sigma = 1): # Function to add Noise
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio,M): # Function to apply Median Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.median(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
def Mean_Filter(audio,M): # Function to apply Mean Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.mean(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
write("Audio_with_Noise.wav",rate,audio) # Creating a Audio signal with noise
audio_new_mean,time_new = Mean_Filter(audio,2)
Plot_Audio(audio_new_mean)
write("Audio_with_Noise_Filtered_Mean.wav",rate,audio_new_mean) # Creating filtered audio signal using Mean Filter
audio_new_median,time_new = Median_Filter(audio,2)
Plot_Audio(audio_new_median)
write("Audio_with_Noise_Filtered_Median.wav",rate,audio_new_median) # Creating filtered audio signal using Median Filter
|
normal
|
{
"blob_id": "844b8e2d4f05a51282b356c995f2733d6935a5d6",
"index": 5552,
"step-1": "<mask token>\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\n<mask token>\n",
"step-2": "<mask token>\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\n<mask token>\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\n<mask token>\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\n<mask token>\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-3": "<mask token>\nrate, audio_original = read('Audio_Original.wav')\naudio = audio_original[:, 0]\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\naudio_new_mean, time_new = Mean_Filter(audio, 2)\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\naudio_new_median, time_new = Median_Filter(audio, 2)\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-4": "import numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.io.wavfile import read\nfrom scipy.io.wavfile import write\nrate, audio_original = read('Audio_Original.wav')\naudio = audio_original[:, 0]\nwrite('Audio_Modified.wav', rate, audio)\nprint(audio.shape[0])\nprint(audio.shape[0] / rate)\n\n\ndef Plot_Audio(audio):\n s = audio.shape[0]\n time = np.arange(s)\n plt.plot(time, audio)\n plt.show()\n\n\ndef Add_Noise(audio, mu=0, sigma=1):\n \"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n gaussian_noise = np.random.normal(0, 1, audio.shape[0])\n audio = audio + gaussian_noise\n return audio\n\n\ndef Median_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.median(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\ndef Mean_Filter(audio, M):\n \"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n p, q, s = M, audio.shape[0] - M, audio.shape[0]\n audio_change = np.zeros(s + 2 * M)\n audio_change[M:s + M] = audio\n audio_new = np.zeros(s)\n for i in range(M, s + M):\n audio_new[i - M] = np.mean(audio_change[i - M:i + M])\n time = np.arange(s)\n return audio_new, time\n\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite('Audio_with_Noise.wav', rate, audio)\naudio_new_mean, time_new = Mean_Filter(audio, 2)\nPlot_Audio(audio_new_mean)\nwrite('Audio_with_Noise_Filtered_Mean.wav', rate, audio_new_mean)\naudio_new_median, time_new = Median_Filter(audio, 2)\nPlot_Audio(audio_new_median)\nwrite('Audio_with_Noise_Filtered_Median.wav', rate, audio_new_median)\n",
"step-5": "# We will try to implement add noise to audio file and filter it using Mean and Median Filters.\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy.io.wavfile import read\nfrom scipy.io.wavfile import write\n\n\nrate,audio_original = read('Audio_Original.wav')\naudio = audio_original[:,0]\nwrite(\"Audio_Modified.wav\",rate,audio)\nprint (audio.shape[0])\nprint (audio.shape[0]/rate)\t\t\t\t\t\t\t\t\t\t\t\t# Time of track\n# print (audio.shape[1])\t\t\t\t\t\t\t\t\t\t\t\t# No.of Channels\n\ndef Plot_Audio(audio):\t\t\t\t\t\t\t\t\t\t\t\t\t# Function to plot Audio Signal\n\ts = audio.shape[0]\n\ttime = np.arange(s)\n\tplt.plot(time,audio)\n\tplt.show()\n\ndef Add_Noise(audio,mu = 0,sigma = 1):\t\t\t\t\t\t\t\t\t# Function to add Noise\n\t\"\"\"\n\tAdding Gaussian Noise\n\t\"\"\"\n\tgaussian_noise = np.random.normal(0, 1, audio.shape[0])\n\taudio = audio + gaussian_noise\n\t\n\treturn audio\n\t\ndef Median_Filter(audio,M):\t\t\t\t\t\t\t\t\t\t\t\t# Function to apply Median Filter to audio signal\n\t\"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n\tp,q,s = M,audio.shape[0]- M,audio.shape[0]\n\taudio_change = np.zeros(s+2*M)\n\taudio_change[M:s+M] = audio\n\taudio_new = np.zeros(s)\n\t\t\n\tfor i in range(M,s+M):\n\t\taudio_new[i-M] = np.median(audio_change[i-M:i+M])\n\t\n\ttime = np.arange(s)\t\n\t\n\treturn audio_new,time\n\ndef Mean_Filter(audio,M):\t\t\t\t\t\t\t\t\t\t\t\t# Function to apply Mean Filter to audio signal\n\t\"\"\"\n\taudio = signal on which filter needs to be applied\n\tM = Bandwidth of filter\n\t\"\"\"\n\tp,q,s = M,audio.shape[0]- M,audio.shape[0]\n\taudio_change = np.zeros(s+2*M)\n\taudio_change[M:s+M] = audio\n\taudio_new = np.zeros(s)\n\t\t\n\tfor i in range(M,s+M):\n\t\taudio_new[i-M] = np.mean(audio_change[i-M:i+M])\n\t\n\ttime = np.arange(s)\t\n\t\n\treturn audio_new,time\n\nPlot_Audio(audio)\naudio = Add_Noise(audio)\nPlot_Audio(audio)\nwrite(\"Audio_with_Noise.wav\",rate,audio)\t\t\t\t\t\t\t\t# Creating a Audio signal with noise\n\n\naudio_new_mean,time_new = Mean_Filter(audio,2)\nPlot_Audio(audio_new_mean)\nwrite(\"Audio_with_Noise_Filtered_Mean.wav\",rate,audio_new_mean)\t\t\t# Creating filtered audio signal using Mean Filter\n\t\t\naudio_new_median,time_new = Median_Filter(audio,2)\nPlot_Audio(audio_new_median)\nwrite(\"Audio_with_Noise_Filtered_Median.wav\",rate,audio_new_median)\t\t# Creating filtered audio signal using Median Filter\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import numpy as np
class nearest(svm):
name="MLLKM2"
def __init__(self):
svm.__init__(self)
def fit(self,x,y):
self.x=x
self.y=y
def predict(self,x):
diff=np.subtract(x,self.x)
distance=np.linalg.norm(diff,axis=1)
dmin= np.argmin( distance )
return self.y[dmin]
|
normal
|
{
"blob_id": "7d1ca15129b1bf6b713e1d5eda4436d4a8539ad1",
"index": 5939,
"step-1": "<mask token>\n\n\nclass nearest(svm):\n <mask token>\n\n def __init__(self):\n svm.__init__(self)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass nearest(svm):\n <mask token>\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass nearest(svm):\n name = 'MLLKM2'\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n\n def predict(self, x):\n diff = np.subtract(x, self.x)\n distance = np.linalg.norm(diff, axis=1)\n dmin = np.argmin(distance)\n return self.y[dmin]\n",
"step-4": "import numpy as np\n\n\nclass nearest(svm):\n name = 'MLLKM2'\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n\n def predict(self, x):\n diff = np.subtract(x, self.x)\n distance = np.linalg.norm(diff, axis=1)\n dmin = np.argmin(distance)\n return self.y[dmin]\n",
"step-5": "import numpy as np\n\nclass nearest(svm):\n name=\"MLLKM2\"\n def __init__(self):\n svm.__init__(self)\n\n def fit(self,x,y):\n self.x=x\n self.y=y\n \n def predict(self,x):\n diff=np.subtract(x,self.x)\n distance=np.linalg.norm(diff,axis=1)\n dmin= np.argmin( distance )\n return self.y[dmin]",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import argparse # for handling command line arguments
import collections # for container types like OrderedDict
import configparser
import hashlib # for SHA-1
import os
import re
import sys
import zlib # git compresses everything using zlib
argparser = argparse.ArgumentParser(description="The stupid content tracker")
# we don't just call git, we always call git command (init, add, clone)
# hence we need to add subparsers to our arg parser
# dest=command means the command we pass will be stored as a string
# in an attribute called command
argsubparsers = argparser.add_subparsers(title="Commands", dest="command")
argsubparsers.required = True
def main(args = sys.argv[1:]):
args = argparser.parse_args(argv)
if args.command == "add" : cmd_add(args)
elif args.command == "cat-file" : cmd_cat_file(args)
elif args.command == "checkout" : cmd_checkout(args)
elif args.command == "commit" : cmd_commit(args)
elif args.command == "hash-object" : cmd_hash_object(args)
elif args.command == "init" : cmd_init(args)
elif args.command == "log" : cmd_log(args)
elif args.command == "ls-tree" : cmd_ls-tree(args)
elif args.command == "merge" : cmd_merge(args)
elif args.command == "rebase" : cmd_rebase(args)
elif args.command == "rev-parse" : cmd_rev_parse(args)
elif args.command == "rm" : cmd_rm(args)
elif args.command == "show-ref" : cmd_show_ref(args)
elif args.command == "tag" : cmd_tag(args)
# abstraction for a git repository
class GitRepository(object):
"""A git repository"""
# a git repo contains 2 things, worktree which is the folder we want to apply version control on
# and a .git repo where git stores its own things
# the config file is stored in .git/config
worktree = None
gitdir = None
conf = None
# an additional force parameter to disable checks
def __init__(self, path, force=False):
self.worktree = path
self.gitdir = os.path.join(path, ".git")
if not (force or os.path.isdir(self.gitdir)):
raise Exception("Not a git repository %s" % path)
# Read configuration file in .git/config
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration file missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception("Unsupported repositoryformatversion %s " %vers)
# we will be doing a lot of path manipulations hence we will write some utility functions
def repo_path(repo, *path):
"""Compute path under repo's gitdir"""
return os.path.join(repo.gitdir, *path)
def repo_file(repo, *path, mkdir=False):
"""Same as repo_path, but creates dirname(*path) if absent. For example repo_file(r, "refs", "remotes", "origin")
will create .git/refs/remotes."""
if repo_dir(repo, *path[:-1], mkdir=mkdir):
return repo_path(repo, *path)
def repo_dir(repo, *path, mkdir=False):
"""Same as repo_path, but mkdir *path if absent if mkdir"""
path = repo_path(repo, *path)
if os.path.exists(path):
if (os.path.isdir(path)):
return path
else:
raise Exception("Not a directory %s" % path)
if mkdir:
os.makedirs(path)
return path
else:
return None
# to create a new git repo, we create the following paths
# .git is the git repository
# .git/objects: the object store
# .git/refs: the reference store, it contains 2 subdirs heads and tags
# .git/HEAD: a reference to the current head
# .git/config: repository's configuration file
# .git/description: repository's description file
def repo_create(path):
"""Create a new repository at path."""
repo = GitRepository(path, True)
if os.path.exists(repo.worktree):
if not os.path.isdir(repo.worktree):
raise Exception("%s is not a directory!" % path)
if os.listdir(repo.worktree):
raise Exception("%s is not empty!" % path)
else:
os.makedirs(repo.worktree)
assert(repo_dir(repo, "branches", mkdir=True)
assert(repo_dir(repo, "objects", mkdir=True)
assert(repo_dir(repo, "refs", "tags", mkdir=True)
assert(repo_dir(repo, "refs", "heads", mkdir=True)
# .git/description
with open(repo_file(repo, "description"), "w") as f:
f.write("Unnamed repository: edit this file 'description' to name the repository.\n")
# .git/HEAD
with open(repo_file(repo, "HEAD"), "w") as f:
f.write("ref: refs/heads/master\n")
with open(repo_file(repo, "config"), "w") as f:
config = repo_default_config()
config.write(f)
return repo
|
normal
|
{
"blob_id": "1c8145007edb09d77a3b15de5c34d0bc86c0ba97",
"index": 8343,
"step-1": "import argparse # for handling command line arguments\nimport collections # for container types like OrderedDict\nimport configparser\nimport hashlib # for SHA-1\nimport os\nimport re\nimport sys\nimport zlib # git compresses everything using zlib\n\nargparser = argparse.ArgumentParser(description=\"The stupid content tracker\")\n\n# we don't just call git, we always call git command (init, add, clone)\n# hence we need to add subparsers to our arg parser\n\n# dest=command means the command we pass will be stored as a string\n# in an attribute called command\nargsubparsers = argparser.add_subparsers(title=\"Commands\", dest=\"command\")\nargsubparsers.required = True\n\ndef main(args = sys.argv[1:]):\n args = argparser.parse_args(argv)\n\n if args.command == \"add\" : cmd_add(args)\n elif args.command == \"cat-file\" : cmd_cat_file(args)\n elif args.command == \"checkout\" : cmd_checkout(args)\n elif args.command == \"commit\" : cmd_commit(args)\n elif args.command == \"hash-object\" : cmd_hash_object(args)\n elif args.command == \"init\" : cmd_init(args)\n elif args.command == \"log\" : cmd_log(args)\n elif args.command == \"ls-tree\" : cmd_ls-tree(args)\n elif args.command == \"merge\" : cmd_merge(args)\n elif args.command == \"rebase\" : cmd_rebase(args)\n elif args.command == \"rev-parse\" : cmd_rev_parse(args)\n elif args.command == \"rm\" : cmd_rm(args)\n elif args.command == \"show-ref\" : cmd_show_ref(args)\n elif args.command == \"tag\" : cmd_tag(args)\n\n# abstraction for a git repository\nclass GitRepository(object):\n \"\"\"A git repository\"\"\"\n # a git repo contains 2 things, worktree which is the folder we want to apply version control on\n # and a .git repo where git stores its own things\n # the config file is stored in .git/config\n\n worktree = None\n gitdir = None\n conf = None\n \n # an additional force parameter to disable checks\n def __init__(self, path, force=False):\n self.worktree = path\n self.gitdir = os.path.join(path, \".git\")\n \n if not (force or os.path.isdir(self.gitdir)):\n raise Exception(\"Not a git repository %s\" % path)\n \n # Read configuration file in .git/config\n self.conf = configparser.ConfigParser()\n cf = repo_file(self, \"config\")\n\n if cf and os.path.exists(cf):\n self.conf.read([cf])\n elif not force:\n raise Exception(\"Configuration file missing\")\n \n if not force:\n vers = int(self.conf.get(\"core\", \"repositoryformatversion\"))\n if vers != 0:\n raise Exception(\"Unsupported repositoryformatversion %s \" %vers)\n\n\n\n# we will be doing a lot of path manipulations hence we will write some utility functions\ndef repo_path(repo, *path):\n \"\"\"Compute path under repo's gitdir\"\"\"\n return os.path.join(repo.gitdir, *path)\n\n\ndef repo_file(repo, *path, mkdir=False):\n \"\"\"Same as repo_path, but creates dirname(*path) if absent. For example repo_file(r, \"refs\", \"remotes\", \"origin\")\n will create .git/refs/remotes.\"\"\"\n\n if repo_dir(repo, *path[:-1], mkdir=mkdir):\n return repo_path(repo, *path)\n\ndef repo_dir(repo, *path, mkdir=False):\n \"\"\"Same as repo_path, but mkdir *path if absent if mkdir\"\"\"\n\n path = repo_path(repo, *path)\n\n if os.path.exists(path):\n if (os.path.isdir(path)):\n return path\n else:\n raise Exception(\"Not a directory %s\" % path)\n\n if mkdir:\n os.makedirs(path)\n return path\n else:\n return None\n\n# to create a new git repo, we create the following paths\n# .git is the git repository\n# .git/objects: the object store\n# .git/refs: the reference store, it contains 2 subdirs heads and tags\n# .git/HEAD: a reference to the current head\n# .git/config: repository's configuration file\n# .git/description: repository's description file\n\ndef repo_create(path):\n \"\"\"Create a new repository at path.\"\"\"\n \n repo = GitRepository(path, True)\n \n if os.path.exists(repo.worktree):\n if not os.path.isdir(repo.worktree):\n raise Exception(\"%s is not a directory!\" % path)\n if os.listdir(repo.worktree):\n raise Exception(\"%s is not empty!\" % path)\n else:\n os.makedirs(repo.worktree)\n\n assert(repo_dir(repo, \"branches\", mkdir=True) \n assert(repo_dir(repo, \"objects\", mkdir=True) \n assert(repo_dir(repo, \"refs\", \"tags\", mkdir=True) \n assert(repo_dir(repo, \"refs\", \"heads\", mkdir=True) \n\n # .git/description\n with open(repo_file(repo, \"description\"), \"w\") as f:\n f.write(\"Unnamed repository: edit this file 'description' to name the repository.\\n\")\n\n # .git/HEAD\n with open(repo_file(repo, \"HEAD\"), \"w\") as f:\n f.write(\"ref: refs/heads/master\\n\")\n\n with open(repo_file(repo, \"config\"), \"w\") as f:\n config = repo_default_config()\n config.write(f)\n\n\n return repo\n \n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScooterHutAUSpider(StockInStoreSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScooterHutAUSpider(StockInStoreSpider):
name = 'scooter_hut_au'
item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}
api_site_id = '10112'
api_widget_id = '119'
api_widget_type = 'product'
api_origin = 'https://scooterhut.com.au'
<|reserved_special_token_1|>
from locations.storefinders.stockinstore import StockInStoreSpider
class ScooterHutAUSpider(StockInStoreSpider):
name = 'scooter_hut_au'
item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}
api_site_id = '10112'
api_widget_id = '119'
api_widget_type = 'product'
api_origin = 'https://scooterhut.com.au'
<|reserved_special_token_1|>
from locations.storefinders.stockinstore import StockInStoreSpider
class ScooterHutAUSpider(StockInStoreSpider):
name = "scooter_hut_au"
item_attributes = {"brand": "Scooter Hut", "brand_wikidata": "Q117747623"}
api_site_id = "10112"
api_widget_id = "119"
api_widget_type = "product"
api_origin = "https://scooterhut.com.au"
|
flexible
|
{
"blob_id": "e37f4422c1063df50453f7abf72a0a9a31156d8b",
"index": 899,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n",
"step-4": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = 'scooter_hut_au'\n item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}\n api_site_id = '10112'\n api_widget_id = '119'\n api_widget_type = 'product'\n api_origin = 'https://scooterhut.com.au'\n",
"step-5": "from locations.storefinders.stockinstore import StockInStoreSpider\n\n\nclass ScooterHutAUSpider(StockInStoreSpider):\n name = \"scooter_hut_au\"\n item_attributes = {\"brand\": \"Scooter Hut\", \"brand_wikidata\": \"Q117747623\"}\n api_site_id = \"10112\"\n api_widget_id = \"119\"\n api_widget_type = \"product\"\n api_origin = \"https://scooterhut.com.au\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
ReplaceCornersAtCertainAngles()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__doc__ = """
Replace Corner Components at blunt or acute angles.
"""
<|reserved_special_token_0|>
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
ReplaceCornersAtCertainAngles()
<|reserved_special_token_1|>
from __future__ import division, print_function, unicode_literals
__doc__ = """
Replace Corner Components at blunt or acute angles.
"""
import vanilla, math
from Foundation import NSPoint
class ReplaceCornersAtCertainAngles(object):
def __init__(self):
windowWidth = 250
windowHeight = 140
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow((windowWidth, windowHeight),
'Replace Corners At Certain Angles', minSize=(windowWidth,
windowHeight), maxSize=(windowWidth + windowWidthResize,
windowHeight + windowHeightResize), autosaveName=
'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')
self.cornerList = self.getAllCorners()
self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',
sizeStyle='small')
self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),
self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',
sizeStyle='small')
self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15,
17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),
'at angles', sizeStyle='small')
self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),
('larger', 'smaller'), sizeStyle='small', callback=self.
SavePreferences)
self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',
sizeStyle='small')
self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),
'90', sizeStyle='small')
self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),
'Replace', sizeStyle='regular', callback=self.
ReplaceCornersAtCertainAnglesMain)
self.w.setDefaultButton(self.w.runButton)
if not self.LoadPreferences():
print(
"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults"
)
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences(self, sender):
try:
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'
] = self.w.largerOrSmaller.get()
Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'
] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences(self):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_({
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':
'0',
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':
'90'})
self.w.largerOrSmaller.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])
self.w.thresholdAngle.set(Glyphs.defaults[
'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])
except:
return False
return True
def CheckButton(self, sender):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith(
'_corner.')]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)
vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)
angle1 = math.degrees(math.atan2(vector1.y, vector1.x))
angle2 = math.degrees(math.atan2(vector2.y, vector2.x))
angleBetweenVectors = (angle1 - angle2) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain(self, sender):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % (thisGlyph.name,
masterLayer.name))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if (thisHint.type == CORNER and thisHint.name ==
fromCornerName):
node = thisHint.originNode
angle = self.angleBetweenVectors(node.
prevNode, node, node.nextNode)
if (smallerThan and angle < thresholdAngle or
not smallerThan and angle > thresholdAngle
):
thisHint.name = toCornerName
print(
'- replaced hint at %i, %i (angle: %.1f)'
% (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences(self):
print(
"Note: 'Replace Corners At Certain Angles' could not write preferences."
)
except Exception as e:
Glyphs.showMacroWindow()
print('Replace Corners At Certain Angles Error: %s' % e)
ReplaceCornersAtCertainAngles()
<|reserved_special_token_1|>
#MenuTitle: Find and Replace Corner Components at Certain Angles
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Replace Corner Components at blunt or acute angles.
"""
import vanilla, math
from Foundation import NSPoint
class ReplaceCornersAtCertainAngles( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 250
windowHeight = 140
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Replace Corners At Certain Angles", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow" # stores last window position and size
)
self.cornerList = self.getAllCorners()
# UI elements:
self.w.text_1 = vanilla.TextBox( (15-1, 12+2, 75, 14), "Replace", sizeStyle='small' )
self.w.searchForCorner = vanilla.PopUpButton( (15+60, 12, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_2 = vanilla.TextBox( (15-1, 36+2, 75, 14), "with", sizeStyle='small' )
self.w.replaceWithCorner = vanilla.PopUpButton( (15+60, 36, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)
self.w.text_3a = vanilla.TextBox( (15-1, 60+2, 75, 14), "at angles", sizeStyle='small' )
self.w.largerOrSmaller = vanilla.PopUpButton( (15+60, 60, 70, 17), ("larger","smaller"), sizeStyle='small', callback=self.SavePreferences )
self.w.text_3b = vanilla.TextBox( (150, 60+2, 30, 14), "than", sizeStyle='small' )
self.w.thresholdAngle = vanilla.EditText( (180, 60, -15, 15+3), "90", sizeStyle = 'small')
# Run Button:
self.w.runButton = vanilla.Button((-80-15, -20-15, -15, -15), "Replace", sizeStyle='regular', callback=self.ReplaceCornersAtCertainAnglesMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.CheckButton(None)
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller"] = self.w.largerOrSmaller.get()
Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle"] = self.w.thresholdAngle.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller": "0",
"com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle": "90"
}
)
self.w.largerOrSmaller.set( Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller"] )
self.w.thresholdAngle.set( Glyphs.defaults["com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle"] )
except:
return False
return True
def CheckButton( self, sender ):
if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():
self.w.runButton.enable(onOff=False)
else:
self.w.runButton.enable(onOff=True)
def getAllCorners(self):
thisFont = Glyphs.font
corners = [g.name for g in thisFont.glyphs if g.name.startswith("_corner.")]
return corners
def angleBetweenVectors(self, P0, P1, P2):
vector1 = NSPoint(P0.x-P1.x, P0.y-P1.y)
vector2 = NSPoint(P2.x-P1.x, P2.y-P1.y)
angle1 = math.degrees(math.atan2(vector1.y,vector1.x))
angle2 = math.degrees(math.atan2(vector2.y,vector2.x))
angleBetweenVectors = ( angle1 - angle2 ) % 360.0
return angleBetweenVectors
def ReplaceCornersAtCertainAnglesMain( self, sender ):
try:
fromSelection = self.w.searchForCorner.get()
fromCornerName = self.cornerList[fromSelection]
toSelection = self.w.replaceWithCorner.get()
toCornerName = self.cornerList[toSelection]
smallerThan = bool(self.w.largerOrSmaller.get())
thresholdAngle = float(self.w.thresholdAngle.get())
thisFont = Glyphs.font # frontmost font
masterIDs = [m.id for m in thisFont.masters]
selectedGlyphs = [l.parent for l in thisFont.selectedLayers]
for thisGlyph in selectedGlyphs:
for masterID in masterIDs:
masterLayer = thisGlyph.layers[masterID]
print("Processing %s, layer '%s'" % ( thisGlyph.name, masterLayer.name ))
if masterLayer.hints:
for thisHint in masterLayer.hints:
if thisHint.type == CORNER and thisHint.name == fromCornerName:
node = thisHint.originNode
angle = self.angleBetweenVectors( node.prevNode, node, node.nextNode )
if (smallerThan and angle < thresholdAngle) or (not smallerThan and angle > thresholdAngle):
thisHint.name = toCornerName
print("- replaced hint at %i, %i (angle: %.1f)" % (node.x, node.y, angle))
else:
print(angle)
if not self.SavePreferences( self ):
print("Note: 'Replace Corners At Certain Angles' could not write preferences.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Replace Corners At Certain Angles Error: %s" % e)
ReplaceCornersAtCertainAngles()
|
flexible
|
{
"blob_id": "540ae4be6a41d52d9c803f829fc8b13b523b31bc",
"index": 116,
"step-1": "<mask token>\n\n\nclass ReplaceCornersAtCertainAngles(object):\n\n def __init__(self):\n windowWidth = 250\n windowHeight = 140\n windowWidthResize = 100\n windowHeightResize = 0\n self.w = vanilla.FloatingWindow((windowWidth, windowHeight),\n 'Replace Corners At Certain Angles', minSize=(windowWidth,\n windowHeight), maxSize=(windowWidth + windowWidthResize, \n windowHeight + windowHeightResize), autosaveName=\n 'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')\n self.cornerList = self.getAllCorners()\n self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',\n sizeStyle='small')\n self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),\n self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',\n sizeStyle='small')\n self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15, \n 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),\n 'at angles', sizeStyle='small')\n self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),\n ('larger', 'smaller'), sizeStyle='small', callback=self.\n SavePreferences)\n self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',\n sizeStyle='small')\n self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),\n '90', sizeStyle='small')\n self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),\n 'Replace', sizeStyle='regular', callback=self.\n ReplaceCornersAtCertainAnglesMain)\n self.w.setDefaultButton(self.w.runButton)\n if not self.LoadPreferences():\n print(\n \"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults\"\n )\n self.CheckButton(None)\n self.w.open()\n self.w.makeKey()\n\n def SavePreferences(self, sender):\n try:\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'\n ] = self.w.largerOrSmaller.get()\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'\n ] = self.w.thresholdAngle.get()\n except:\n return False\n return True\n\n def LoadPreferences(self):\n try:\n NSUserDefaults.standardUserDefaults().registerDefaults_({\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':\n '0',\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':\n '90'})\n self.w.largerOrSmaller.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])\n self.w.thresholdAngle.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])\n except:\n return False\n return True\n\n def CheckButton(self, sender):\n if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():\n self.w.runButton.enable(onOff=False)\n else:\n self.w.runButton.enable(onOff=True)\n\n def getAllCorners(self):\n thisFont = Glyphs.font\n corners = [g.name for g in thisFont.glyphs if g.name.startswith(\n '_corner.')]\n return corners\n\n def angleBetweenVectors(self, P0, P1, P2):\n vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)\n vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)\n angle1 = math.degrees(math.atan2(vector1.y, vector1.x))\n angle2 = math.degrees(math.atan2(vector2.y, vector2.x))\n angleBetweenVectors = (angle1 - angle2) % 360.0\n return angleBetweenVectors\n\n def ReplaceCornersAtCertainAnglesMain(self, sender):\n try:\n fromSelection = self.w.searchForCorner.get()\n fromCornerName = self.cornerList[fromSelection]\n toSelection = self.w.replaceWithCorner.get()\n toCornerName = self.cornerList[toSelection]\n smallerThan = bool(self.w.largerOrSmaller.get())\n thresholdAngle = float(self.w.thresholdAngle.get())\n thisFont = Glyphs.font\n masterIDs = [m.id for m in thisFont.masters]\n selectedGlyphs = [l.parent for l in thisFont.selectedLayers]\n for thisGlyph in selectedGlyphs:\n for masterID in masterIDs:\n masterLayer = thisGlyph.layers[masterID]\n print(\"Processing %s, layer '%s'\" % (thisGlyph.name,\n masterLayer.name))\n if masterLayer.hints:\n for thisHint in masterLayer.hints:\n if (thisHint.type == CORNER and thisHint.name ==\n fromCornerName):\n node = thisHint.originNode\n angle = self.angleBetweenVectors(node.\n prevNode, node, node.nextNode)\n if (smallerThan and angle < thresholdAngle or\n not smallerThan and angle > thresholdAngle\n ):\n thisHint.name = toCornerName\n print(\n '- replaced hint at %i, %i (angle: %.1f)'\n % (node.x, node.y, angle))\n else:\n print(angle)\n if not self.SavePreferences(self):\n print(\n \"Note: 'Replace Corners At Certain Angles' could not write preferences.\"\n )\n except Exception as e:\n Glyphs.showMacroWindow()\n print('Replace Corners At Certain Angles Error: %s' % e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ReplaceCornersAtCertainAngles(object):\n\n def __init__(self):\n windowWidth = 250\n windowHeight = 140\n windowWidthResize = 100\n windowHeightResize = 0\n self.w = vanilla.FloatingWindow((windowWidth, windowHeight),\n 'Replace Corners At Certain Angles', minSize=(windowWidth,\n windowHeight), maxSize=(windowWidth + windowWidthResize, \n windowHeight + windowHeightResize), autosaveName=\n 'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')\n self.cornerList = self.getAllCorners()\n self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',\n sizeStyle='small')\n self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),\n self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',\n sizeStyle='small')\n self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15, \n 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),\n 'at angles', sizeStyle='small')\n self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),\n ('larger', 'smaller'), sizeStyle='small', callback=self.\n SavePreferences)\n self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',\n sizeStyle='small')\n self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),\n '90', sizeStyle='small')\n self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),\n 'Replace', sizeStyle='regular', callback=self.\n ReplaceCornersAtCertainAnglesMain)\n self.w.setDefaultButton(self.w.runButton)\n if not self.LoadPreferences():\n print(\n \"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults\"\n )\n self.CheckButton(None)\n self.w.open()\n self.w.makeKey()\n\n def SavePreferences(self, sender):\n try:\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'\n ] = self.w.largerOrSmaller.get()\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'\n ] = self.w.thresholdAngle.get()\n except:\n return False\n return True\n\n def LoadPreferences(self):\n try:\n NSUserDefaults.standardUserDefaults().registerDefaults_({\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':\n '0',\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':\n '90'})\n self.w.largerOrSmaller.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])\n self.w.thresholdAngle.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])\n except:\n return False\n return True\n\n def CheckButton(self, sender):\n if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():\n self.w.runButton.enable(onOff=False)\n else:\n self.w.runButton.enable(onOff=True)\n\n def getAllCorners(self):\n thisFont = Glyphs.font\n corners = [g.name for g in thisFont.glyphs if g.name.startswith(\n '_corner.')]\n return corners\n\n def angleBetweenVectors(self, P0, P1, P2):\n vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)\n vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)\n angle1 = math.degrees(math.atan2(vector1.y, vector1.x))\n angle2 = math.degrees(math.atan2(vector2.y, vector2.x))\n angleBetweenVectors = (angle1 - angle2) % 360.0\n return angleBetweenVectors\n\n def ReplaceCornersAtCertainAnglesMain(self, sender):\n try:\n fromSelection = self.w.searchForCorner.get()\n fromCornerName = self.cornerList[fromSelection]\n toSelection = self.w.replaceWithCorner.get()\n toCornerName = self.cornerList[toSelection]\n smallerThan = bool(self.w.largerOrSmaller.get())\n thresholdAngle = float(self.w.thresholdAngle.get())\n thisFont = Glyphs.font\n masterIDs = [m.id for m in thisFont.masters]\n selectedGlyphs = [l.parent for l in thisFont.selectedLayers]\n for thisGlyph in selectedGlyphs:\n for masterID in masterIDs:\n masterLayer = thisGlyph.layers[masterID]\n print(\"Processing %s, layer '%s'\" % (thisGlyph.name,\n masterLayer.name))\n if masterLayer.hints:\n for thisHint in masterLayer.hints:\n if (thisHint.type == CORNER and thisHint.name ==\n fromCornerName):\n node = thisHint.originNode\n angle = self.angleBetweenVectors(node.\n prevNode, node, node.nextNode)\n if (smallerThan and angle < thresholdAngle or\n not smallerThan and angle > thresholdAngle\n ):\n thisHint.name = toCornerName\n print(\n '- replaced hint at %i, %i (angle: %.1f)'\n % (node.x, node.y, angle))\n else:\n print(angle)\n if not self.SavePreferences(self):\n print(\n \"Note: 'Replace Corners At Certain Angles' could not write preferences.\"\n )\n except Exception as e:\n Glyphs.showMacroWindow()\n print('Replace Corners At Certain Angles Error: %s' % e)\n\n\nReplaceCornersAtCertainAngles()\n",
"step-3": "<mask token>\n__doc__ = \"\"\"\nReplace Corner Components at blunt or acute angles.\n\"\"\"\n<mask token>\n\n\nclass ReplaceCornersAtCertainAngles(object):\n\n def __init__(self):\n windowWidth = 250\n windowHeight = 140\n windowWidthResize = 100\n windowHeightResize = 0\n self.w = vanilla.FloatingWindow((windowWidth, windowHeight),\n 'Replace Corners At Certain Angles', minSize=(windowWidth,\n windowHeight), maxSize=(windowWidth + windowWidthResize, \n windowHeight + windowHeightResize), autosaveName=\n 'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')\n self.cornerList = self.getAllCorners()\n self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',\n sizeStyle='small')\n self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),\n self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',\n sizeStyle='small')\n self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15, \n 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),\n 'at angles', sizeStyle='small')\n self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),\n ('larger', 'smaller'), sizeStyle='small', callback=self.\n SavePreferences)\n self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',\n sizeStyle='small')\n self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),\n '90', sizeStyle='small')\n self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),\n 'Replace', sizeStyle='regular', callback=self.\n ReplaceCornersAtCertainAnglesMain)\n self.w.setDefaultButton(self.w.runButton)\n if not self.LoadPreferences():\n print(\n \"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults\"\n )\n self.CheckButton(None)\n self.w.open()\n self.w.makeKey()\n\n def SavePreferences(self, sender):\n try:\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'\n ] = self.w.largerOrSmaller.get()\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'\n ] = self.w.thresholdAngle.get()\n except:\n return False\n return True\n\n def LoadPreferences(self):\n try:\n NSUserDefaults.standardUserDefaults().registerDefaults_({\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':\n '0',\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':\n '90'})\n self.w.largerOrSmaller.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])\n self.w.thresholdAngle.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])\n except:\n return False\n return True\n\n def CheckButton(self, sender):\n if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():\n self.w.runButton.enable(onOff=False)\n else:\n self.w.runButton.enable(onOff=True)\n\n def getAllCorners(self):\n thisFont = Glyphs.font\n corners = [g.name for g in thisFont.glyphs if g.name.startswith(\n '_corner.')]\n return corners\n\n def angleBetweenVectors(self, P0, P1, P2):\n vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)\n vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)\n angle1 = math.degrees(math.atan2(vector1.y, vector1.x))\n angle2 = math.degrees(math.atan2(vector2.y, vector2.x))\n angleBetweenVectors = (angle1 - angle2) % 360.0\n return angleBetweenVectors\n\n def ReplaceCornersAtCertainAnglesMain(self, sender):\n try:\n fromSelection = self.w.searchForCorner.get()\n fromCornerName = self.cornerList[fromSelection]\n toSelection = self.w.replaceWithCorner.get()\n toCornerName = self.cornerList[toSelection]\n smallerThan = bool(self.w.largerOrSmaller.get())\n thresholdAngle = float(self.w.thresholdAngle.get())\n thisFont = Glyphs.font\n masterIDs = [m.id for m in thisFont.masters]\n selectedGlyphs = [l.parent for l in thisFont.selectedLayers]\n for thisGlyph in selectedGlyphs:\n for masterID in masterIDs:\n masterLayer = thisGlyph.layers[masterID]\n print(\"Processing %s, layer '%s'\" % (thisGlyph.name,\n masterLayer.name))\n if masterLayer.hints:\n for thisHint in masterLayer.hints:\n if (thisHint.type == CORNER and thisHint.name ==\n fromCornerName):\n node = thisHint.originNode\n angle = self.angleBetweenVectors(node.\n prevNode, node, node.nextNode)\n if (smallerThan and angle < thresholdAngle or\n not smallerThan and angle > thresholdAngle\n ):\n thisHint.name = toCornerName\n print(\n '- replaced hint at %i, %i (angle: %.1f)'\n % (node.x, node.y, angle))\n else:\n print(angle)\n if not self.SavePreferences(self):\n print(\n \"Note: 'Replace Corners At Certain Angles' could not write preferences.\"\n )\n except Exception as e:\n Glyphs.showMacroWindow()\n print('Replace Corners At Certain Angles Error: %s' % e)\n\n\nReplaceCornersAtCertainAngles()\n",
"step-4": "from __future__ import division, print_function, unicode_literals\n__doc__ = \"\"\"\nReplace Corner Components at blunt or acute angles.\n\"\"\"\nimport vanilla, math\nfrom Foundation import NSPoint\n\n\nclass ReplaceCornersAtCertainAngles(object):\n\n def __init__(self):\n windowWidth = 250\n windowHeight = 140\n windowWidthResize = 100\n windowHeightResize = 0\n self.w = vanilla.FloatingWindow((windowWidth, windowHeight),\n 'Replace Corners At Certain Angles', minSize=(windowWidth,\n windowHeight), maxSize=(windowWidth + windowWidthResize, \n windowHeight + windowHeightResize), autosaveName=\n 'com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow')\n self.cornerList = self.getAllCorners()\n self.w.text_1 = vanilla.TextBox((15 - 1, 12 + 2, 75, 14), 'Replace',\n sizeStyle='small')\n self.w.searchForCorner = vanilla.PopUpButton((15 + 60, 12, -15, 17),\n self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_2 = vanilla.TextBox((15 - 1, 36 + 2, 75, 14), 'with',\n sizeStyle='small')\n self.w.replaceWithCorner = vanilla.PopUpButton((15 + 60, 36, -15, \n 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n self.w.text_3a = vanilla.TextBox((15 - 1, 60 + 2, 75, 14),\n 'at angles', sizeStyle='small')\n self.w.largerOrSmaller = vanilla.PopUpButton((15 + 60, 60, 70, 17),\n ('larger', 'smaller'), sizeStyle='small', callback=self.\n SavePreferences)\n self.w.text_3b = vanilla.TextBox((150, 60 + 2, 30, 14), 'than',\n sizeStyle='small')\n self.w.thresholdAngle = vanilla.EditText((180, 60, -15, 15 + 3),\n '90', sizeStyle='small')\n self.w.runButton = vanilla.Button((-80 - 15, -20 - 15, -15, -15),\n 'Replace', sizeStyle='regular', callback=self.\n ReplaceCornersAtCertainAnglesMain)\n self.w.setDefaultButton(self.w.runButton)\n if not self.LoadPreferences():\n print(\n \"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults\"\n )\n self.CheckButton(None)\n self.w.open()\n self.w.makeKey()\n\n def SavePreferences(self, sender):\n try:\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'\n ] = self.w.largerOrSmaller.get()\n Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'\n ] = self.w.thresholdAngle.get()\n except:\n return False\n return True\n\n def LoadPreferences(self):\n try:\n NSUserDefaults.standardUserDefaults().registerDefaults_({\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller':\n '0',\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle':\n '90'})\n self.w.largerOrSmaller.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller'])\n self.w.thresholdAngle.set(Glyphs.defaults[\n 'com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle'])\n except:\n return False\n return True\n\n def CheckButton(self, sender):\n if self.w.searchForCorner.get() == self.w.replaceWithCorner.get():\n self.w.runButton.enable(onOff=False)\n else:\n self.w.runButton.enable(onOff=True)\n\n def getAllCorners(self):\n thisFont = Glyphs.font\n corners = [g.name for g in thisFont.glyphs if g.name.startswith(\n '_corner.')]\n return corners\n\n def angleBetweenVectors(self, P0, P1, P2):\n vector1 = NSPoint(P0.x - P1.x, P0.y - P1.y)\n vector2 = NSPoint(P2.x - P1.x, P2.y - P1.y)\n angle1 = math.degrees(math.atan2(vector1.y, vector1.x))\n angle2 = math.degrees(math.atan2(vector2.y, vector2.x))\n angleBetweenVectors = (angle1 - angle2) % 360.0\n return angleBetweenVectors\n\n def ReplaceCornersAtCertainAnglesMain(self, sender):\n try:\n fromSelection = self.w.searchForCorner.get()\n fromCornerName = self.cornerList[fromSelection]\n toSelection = self.w.replaceWithCorner.get()\n toCornerName = self.cornerList[toSelection]\n smallerThan = bool(self.w.largerOrSmaller.get())\n thresholdAngle = float(self.w.thresholdAngle.get())\n thisFont = Glyphs.font\n masterIDs = [m.id for m in thisFont.masters]\n selectedGlyphs = [l.parent for l in thisFont.selectedLayers]\n for thisGlyph in selectedGlyphs:\n for masterID in masterIDs:\n masterLayer = thisGlyph.layers[masterID]\n print(\"Processing %s, layer '%s'\" % (thisGlyph.name,\n masterLayer.name))\n if masterLayer.hints:\n for thisHint in masterLayer.hints:\n if (thisHint.type == CORNER and thisHint.name ==\n fromCornerName):\n node = thisHint.originNode\n angle = self.angleBetweenVectors(node.\n prevNode, node, node.nextNode)\n if (smallerThan and angle < thresholdAngle or\n not smallerThan and angle > thresholdAngle\n ):\n thisHint.name = toCornerName\n print(\n '- replaced hint at %i, %i (angle: %.1f)'\n % (node.x, node.y, angle))\n else:\n print(angle)\n if not self.SavePreferences(self):\n print(\n \"Note: 'Replace Corners At Certain Angles' could not write preferences.\"\n )\n except Exception as e:\n Glyphs.showMacroWindow()\n print('Replace Corners At Certain Angles Error: %s' % e)\n\n\nReplaceCornersAtCertainAngles()\n",
"step-5": "#MenuTitle: Find and Replace Corner Components at Certain Angles\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, unicode_literals\n__doc__=\"\"\"\nReplace Corner Components at blunt or acute angles.\n\"\"\"\n\nimport vanilla, math\nfrom Foundation import NSPoint\n\nclass ReplaceCornersAtCertainAngles( object ):\n\tdef __init__( self ):\n\t\t# Window 'self.w':\n\t\twindowWidth = 250\n\t\twindowHeight = 140\n\t\twindowWidthResize = 100 # user can resize width by this value\n\t\twindowHeightResize = 0 # user can resize height by this value\n\t\t\t\t\n\t\tself.w = vanilla.FloatingWindow(\n\t\t\t( windowWidth, windowHeight ), # default window size\n\t\t\t\"Replace Corners At Certain Angles\", # window title\n\t\t\tminSize = ( windowWidth, windowHeight ), # minimum size (for resizing)\n\t\t\tmaxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)\n\t\t\tautosaveName = \"com.mekkablue.ReplaceCornersAtCertainAngles.mainwindow\" # stores last window position and size\n\t\t)\n\t\t\n\t\tself.cornerList = self.getAllCorners()\n\t\t\n\t\t# UI elements:\n\t\tself.w.text_1 = vanilla.TextBox( (15-1, 12+2, 75, 14), \"Replace\", sizeStyle='small' )\n\t\tself.w.searchForCorner = vanilla.PopUpButton( (15+60, 12, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n\t\tself.w.text_2 = vanilla.TextBox( (15-1, 36+2, 75, 14), \"with\", sizeStyle='small' )\n\t\tself.w.replaceWithCorner = vanilla.PopUpButton( (15+60, 36, -15, 17), self.cornerList, sizeStyle='small', callback=self.CheckButton)\n\t\tself.w.text_3a = vanilla.TextBox( (15-1, 60+2, 75, 14), \"at angles\", sizeStyle='small' )\n\t\tself.w.largerOrSmaller = vanilla.PopUpButton( (15+60, 60, 70, 17), (\"larger\",\"smaller\"), sizeStyle='small', callback=self.SavePreferences )\n\t\tself.w.text_3b = vanilla.TextBox( (150, 60+2, 30, 14), \"than\", sizeStyle='small' )\n\t\tself.w.thresholdAngle = vanilla.EditText( (180, 60, -15, 15+3), \"90\", sizeStyle = 'small')\n\t\t\t\t\n\t\t# Run Button:\n\t\tself.w.runButton = vanilla.Button((-80-15, -20-15, -15, -15), \"Replace\", sizeStyle='regular', callback=self.ReplaceCornersAtCertainAnglesMain )\n\t\tself.w.setDefaultButton( self.w.runButton )\n\t\t\n\t\t# Load Settings:\n\t\tif not self.LoadPreferences():\n\t\t\tprint(\"Note: 'Replace Corners At Certain Angles' could not load preferences. Will resort to defaults\")\n\t\t\n\t\t# Open window and focus on it:\n\t\tself.CheckButton(None)\n\t\tself.w.open()\n\t\tself.w.makeKey()\n\t\t\n\tdef SavePreferences( self, sender ):\n\t\ttry:\n\t\t\tGlyphs.defaults[\"com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller\"] = self.w.largerOrSmaller.get()\n\t\t\tGlyphs.defaults[\"com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle\"] = self.w.thresholdAngle.get()\n\t\texcept:\n\t\t\treturn False\n\t\t\t\n\t\treturn True\n\n\tdef LoadPreferences( self ):\n\t\ttry:\n\t\t\tNSUserDefaults.standardUserDefaults().registerDefaults_(\n\t\t\t\t{\n\t\t\t\t\t\"com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller\": \"0\",\n\t\t\t\t\t\"com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle\": \"90\"\n\t\t\t\t}\n\t\t\t)\n\t\t\tself.w.largerOrSmaller.set( Glyphs.defaults[\"com.mekkablue.ReplaceCornersAtCertainAngles.largerOrSmaller\"] )\n\t\t\tself.w.thresholdAngle.set( Glyphs.defaults[\"com.mekkablue.ReplaceCornersAtCertainAngles.thresholdAngle\"] )\n\t\texcept:\n\t\t\treturn False\n\t\t\t\n\t\treturn True\n\t\t\n\tdef CheckButton( self, sender ):\n\t\tif self.w.searchForCorner.get() == self.w.replaceWithCorner.get():\n\t\t\tself.w.runButton.enable(onOff=False)\n\t\telse:\n\t\t\tself.w.runButton.enable(onOff=True)\n\t\n\tdef getAllCorners(self):\n\t\tthisFont = Glyphs.font\n\t\tcorners = [g.name for g in thisFont.glyphs if g.name.startswith(\"_corner.\")]\n\t\treturn corners\n\t\t\n\tdef angleBetweenVectors(self, P0, P1, P2):\n\t\tvector1 = NSPoint(P0.x-P1.x, P0.y-P1.y)\n\t\tvector2 = NSPoint(P2.x-P1.x, P2.y-P1.y)\n\t\tangle1 = math.degrees(math.atan2(vector1.y,vector1.x))\n\t\tangle2 = math.degrees(math.atan2(vector2.y,vector2.x))\n\t\tangleBetweenVectors = ( angle1 - angle2 ) % 360.0\n\t\treturn angleBetweenVectors\n\t\n\tdef ReplaceCornersAtCertainAnglesMain( self, sender ):\n\t\ttry:\n\t\t\tfromSelection = self.w.searchForCorner.get()\n\t\t\tfromCornerName = self.cornerList[fromSelection]\n\t\t\ttoSelection = self.w.replaceWithCorner.get()\n\t\t\ttoCornerName = self.cornerList[toSelection]\n\t\t\t\n\t\t\tsmallerThan = bool(self.w.largerOrSmaller.get())\n\t\t\tthresholdAngle = float(self.w.thresholdAngle.get())\n\t\t\t\n\t\t\tthisFont = Glyphs.font # frontmost font\n\t\t\tmasterIDs = [m.id for m in thisFont.masters]\n\t\t\tselectedGlyphs = [l.parent for l in thisFont.selectedLayers]\n\t\t\t\n\t\t\tfor thisGlyph in selectedGlyphs:\n\t\t\t\tfor masterID in masterIDs:\n\t\t\t\t\tmasterLayer = thisGlyph.layers[masterID]\n\t\t\t\t\tprint(\"Processing %s, layer '%s'\" % ( thisGlyph.name, masterLayer.name ))\n\t\t\t\t\tif masterLayer.hints:\n\t\t\t\t\t\tfor thisHint in masterLayer.hints:\n\t\t\t\t\t\t\tif thisHint.type == CORNER and thisHint.name == fromCornerName:\n\t\t\t\t\t\t\t\tnode = thisHint.originNode\n\t\t\t\t\t\t\t\tangle = self.angleBetweenVectors( node.prevNode, node, node.nextNode )\n\t\t\t\t\t\t\t\tif (smallerThan and angle < thresholdAngle) or (not smallerThan and angle > thresholdAngle):\n\t\t\t\t\t\t\t\t\tthisHint.name = toCornerName\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprint(\"- replaced hint at %i, %i (angle: %.1f)\" % (node.x, node.y, angle))\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint(angle)\n\t\t\t\n\t\t\t\n\t\t\tif not self.SavePreferences( self ):\n\t\t\t\tprint(\"Note: 'Replace Corners At Certain Angles' could not write preferences.\")\n\t\t\t\n\t\t\t\n\t\texcept Exception as e:\n\t\t\t# brings macro window to front and reports error:\n\t\t\tGlyphs.showMacroWindow()\n\t\t\tprint(\"Replace Corners At Certain Angles Error: %s\" % e)\n\nReplaceCornersAtCertainAngles()",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
<|reserved_special_token_1|>
c0 = int(input('Enter a non- negative, non-zero integer: '))
step = 0
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
<|reserved_special_token_1|>
# take any non-negative and non-zero integer number and name it c0;if it's even, evaluate a new c0 as c0 ÷ 2;
# otherwise, if it's odd, evaluate a new c0 as 3 × c0 + 1;
# if c0 ≠ 1, skip to point 2.
# The hypothesis says that regardless of the initial value of c0,it will always go to 1.
# Write a program which reads one natural number and executes the above steps as long as c0 remains different from 1.
# We also want you to count the steps needed to achieve the goal. Your code should output all the intermediate values of c0, too.
c0 = int(input('Enter a non- negative, non-zero integer: '))
step = 0
while True:
if c0 % 2 == 0:
c0 //= 2
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
break
elif c0 % 2 == 1:
c0 = c0 * 3 + 1
if c0 != 1:
step += 1
print(' New value is ', c0, ':', 'step', step)
continue
elif c0 == 1:
step += 1
print(' New value is ', c0, ':', 'step:', step)
break
print('Total Steps: ', step)
|
flexible
|
{
"blob_id": "e7db3390d30f86e19eee930c48e5f848f41cc579",
"index": 645,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if c0 % 2 == 0:\n c0 //= 2\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n break\n elif c0 % 2 == 1:\n c0 = c0 * 3 + 1\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step:', step)\n break\nprint('Total Steps: ', step)\n",
"step-3": "c0 = int(input('Enter a non- negative, non-zero integer: '))\nstep = 0\nwhile True:\n if c0 % 2 == 0:\n c0 //= 2\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n break\n elif c0 % 2 == 1:\n c0 = c0 * 3 + 1\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step:', step)\n break\nprint('Total Steps: ', step)\n",
"step-4": "# take any non-negative and non-zero integer number and name it c0;if it's even, evaluate a new c0 as c0 ÷ 2;\n# otherwise, if it's odd, evaluate a new c0 as 3 × c0 + 1;\n# if c0 ≠ 1, skip to point 2.\n# The hypothesis says that regardless of the initial value of c0,it will always go to 1.\n# Write a program which reads one natural number and executes the above steps as long as c0 remains different from 1. \n# We also want you to count the steps needed to achieve the goal. Your code should output all the intermediate values of c0, too.\n\nc0 = int(input('Enter a non- negative, non-zero integer: '))\nstep = 0\n\nwhile True:\n if c0 % 2 == 0:\n c0 //= 2\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n break\n elif c0 % 2 == 1:\n c0 = c0 * 3 + 1\n if c0 != 1:\n step += 1\n print(' New value is ', c0, ':', 'step', step)\n continue\n elif c0 == 1:\n step += 1\n print(' New value is ', c0, ':', 'step:', step)\n break\n \nprint('Total Steps: ', step)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import discord
def getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,
spitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,
justice) :
teamList = discord.Embed(
title="Overwatch League Teams",
description="2021 Season\n"+
"**"+reign+"ATL-Atlanta Reign**\n"+
"**"+uprising+"BOS-Boston Uprising**\n"+
"**"+hunters+"CDH-Chengdu Hunters**\n"+
"**"+fuel+"DAL-Dallas Fuel**\n"+
"**"+mayhem+"FLA-Florida Mayhem**\n"+
"**"+gladiators+"GLA-Los Angeles Gladiators**\n"+
"**"+charge+"GZC-Guangzhou Charge**\n"+
"**"+outlaws+"HOU-Houston Outlaws**\n"+
"**"+spark+"HZS-Hangzhou Spark**\n"+
"**"+spitfire+"LDN-London Spitfire**\n"+
"**"+excelsior+"NYE-New York Excelsior**\n"+
"**"+eternal+"PAR-Paris Eternal**\n"+
"**"+fusion+"PHI-Philadelphia Fustion**\n"+
"**"+dynasty+"SEO-Seoul Dynasty**\n"+
"**"+shock+"SFS-San Francisco Shock**\n"+
"**"+dragons+"SHD-Shanghai Dragons**\n"+
"**"+defiant+"TOR-Toronto Defiant**\n"
"**"+valiant+"VAL-Los Angeles Valiant**\n"+
"**"+titans+"VAN-Vancouver Titans**\n"+
"**"+justice+"WAS-Washington Justice**",
color=discord.Colour.gold(),
timestamp=datetime.datetime.utcnow()
)
return teamList
|
normal
|
{
"blob_id": "9a02e09cbfe2c9b6ebb9d20ba6cea639871f0838",
"index": 7647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-3": "import datetime\nimport discord\n\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge,\n outlaws, spark, spitfire, excelsior, eternal, fusion, dynasty, shock,\n dragons, defiant, valiant, titans, justice):\n teamList = discord.Embed(title='Overwatch League Teams', description=\n '2021 Season\\n' + '**' + reign + \"\"\"ATL-Atlanta Reign**\n\"\"\" + '**' +\n uprising + 'BOS-Boston Uprising**\\n' + '**' + hunters +\n 'CDH-Chengdu Hunters**\\n' + '**' + fuel + \"\"\"DAL-Dallas Fuel**\n\"\"\" +\n '**' + mayhem + 'FLA-Florida Mayhem**\\n' + '**' + gladiators +\n \"\"\"GLA-Los Angeles Gladiators**\n\"\"\" + '**' + charge +\n 'GZC-Guangzhou Charge**\\n' + '**' + outlaws +\n 'HOU-Houston Outlaws**\\n' + '**' + spark +\n \"\"\"HZS-Hangzhou Spark**\n\"\"\" + '**' + spitfire +\n 'LDN-London Spitfire**\\n' + '**' + excelsior +\n 'NYE-New York Excelsior**\\n' + '**' + eternal +\n \"\"\"PAR-Paris Eternal**\n\"\"\" + '**' + fusion +\n 'PHI-Philadelphia Fustion**\\n' + '**' + dynasty +\n 'SEO-Seoul Dynasty**\\n' + '**' + shock +\n \"\"\"SFS-San Francisco Shock**\n\"\"\" + '**' + dragons +\n 'SHD-Shanghai Dragons**\\n' + '**' + defiant +\n \"\"\"TOR-Toronto Defiant**\n**\"\"\" + valiant +\n 'VAL-Los Angeles Valiant**\\n' + '**' + titans +\n 'VAN-Vancouver Titans**\\n' + '**' + justice +\n 'WAS-Washington Justice**', color=discord.Colour.gold(), timestamp=\n datetime.datetime.utcnow())\n return teamList\n",
"step-4": "import datetime\nimport discord\n\ndef getTeams(reign, uprising, hunters, fuel, mayhem, gladiators, charge, outlaws, spark,\nspitfire, excelsior, eternal, fusion, dynasty, shock, dragons, defiant, valiant, titans,\njustice) :\n teamList = discord.Embed(\n title=\"Overwatch League Teams\",\n description=\"2021 Season\\n\"+\n \"**\"+reign+\"ATL-Atlanta Reign**\\n\"+\n \"**\"+uprising+\"BOS-Boston Uprising**\\n\"+\n \"**\"+hunters+\"CDH-Chengdu Hunters**\\n\"+\n \"**\"+fuel+\"DAL-Dallas Fuel**\\n\"+\n \"**\"+mayhem+\"FLA-Florida Mayhem**\\n\"+\n \"**\"+gladiators+\"GLA-Los Angeles Gladiators**\\n\"+\n \"**\"+charge+\"GZC-Guangzhou Charge**\\n\"+\n \"**\"+outlaws+\"HOU-Houston Outlaws**\\n\"+\n \"**\"+spark+\"HZS-Hangzhou Spark**\\n\"+\n \"**\"+spitfire+\"LDN-London Spitfire**\\n\"+\n \"**\"+excelsior+\"NYE-New York Excelsior**\\n\"+\n \"**\"+eternal+\"PAR-Paris Eternal**\\n\"+\n \"**\"+fusion+\"PHI-Philadelphia Fustion**\\n\"+\n \"**\"+dynasty+\"SEO-Seoul Dynasty**\\n\"+\n \"**\"+shock+\"SFS-San Francisco Shock**\\n\"+\n \"**\"+dragons+\"SHD-Shanghai Dragons**\\n\"+\n \"**\"+defiant+\"TOR-Toronto Defiant**\\n\"\n \"**\"+valiant+\"VAL-Los Angeles Valiant**\\n\"+\n \"**\"+titans+\"VAN-Vancouver Titans**\\n\"+\n \"**\"+justice+\"WAS-Washington Justice**\",\n color=discord.Colour.gold(),\n timestamp=datetime.datetime.utcnow()\n )\n return teamList\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
featureProvider.add(shapeFeatureProvider)
featureProvider.add(statisticsFeatureProvider)
<|reserved_special_token_0|>
featureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
crag = ...
cragVolumes = ...
raw = ...
membrane = ...
nodeFeatures = ...
edgeFeatures = ...
statisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes,
raw, 'raw')
shapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)
featureProvider = pycmc.CompositeFeatureProvider()
featureProvider.add(shapeFeatureProvider)
featureProvider.add(statisticsFeatureProvider)
featureExtractor = pycmc.FeatureExtractor(crag)
featureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)
<|reserved_special_token_1|>
import pycmc
crag = ...
cragVolumes = ...
raw = ...
membrane = ...
nodeFeatures = ...
edgeFeatures = ...
statisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes,
raw, 'raw')
shapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)
featureProvider = pycmc.CompositeFeatureProvider()
featureProvider.add(shapeFeatureProvider)
featureProvider.add(statisticsFeatureProvider)
featureExtractor = pycmc.FeatureExtractor(crag)
featureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)
<|reserved_special_token_1|>
import pycmc
# open project, get Crag, CragVolumes, and intensity images
crag = ...
cragVolumes = ...
raw = ...
membrane = ...
nodeFeatures = ...
edgeFeatures = ...
statisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes, raw, "raw")
shapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)
featureProvider = pycmc.CompositeFeatureProvider()
featureProvider.add(shapeFeatureProvider)
featureProvider.add(statisticsFeatureProvider)
featureExtractor = pycmc.FeatureExtractor(crag)
featureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)
|
flexible
|
{
"blob_id": "37d817436ce977339594867ef917177e7371a212",
"index": 6847,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfeatureProvider.add(shapeFeatureProvider)\nfeatureProvider.add(statisticsFeatureProvider)\n<mask token>\nfeatureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)\n",
"step-3": "<mask token>\ncrag = ...\ncragVolumes = ...\nraw = ...\nmembrane = ...\nnodeFeatures = ...\nedgeFeatures = ...\nstatisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes,\n raw, 'raw')\nshapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)\nfeatureProvider = pycmc.CompositeFeatureProvider()\nfeatureProvider.add(shapeFeatureProvider)\nfeatureProvider.add(statisticsFeatureProvider)\nfeatureExtractor = pycmc.FeatureExtractor(crag)\nfeatureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)\n",
"step-4": "import pycmc\ncrag = ...\ncragVolumes = ...\nraw = ...\nmembrane = ...\nnodeFeatures = ...\nedgeFeatures = ...\nstatisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes,\n raw, 'raw')\nshapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)\nfeatureProvider = pycmc.CompositeFeatureProvider()\nfeatureProvider.add(shapeFeatureProvider)\nfeatureProvider.add(statisticsFeatureProvider)\nfeatureExtractor = pycmc.FeatureExtractor(crag)\nfeatureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)\n",
"step-5": "import pycmc\n\n# open project, get Crag, CragVolumes, and intensity images\ncrag = ...\ncragVolumes = ...\nraw = ...\nmembrane = ...\nnodeFeatures = ...\nedgeFeatures = ...\n\nstatisticsFeatureProvider = pycmc.StatisticsFeatureProvider(cragVolumes, raw, \"raw\")\nshapeFeatureProvider = pycmc.ShapeFeatureProvider(cragVolumes)\n\nfeatureProvider = pycmc.CompositeFeatureProvider()\nfeatureProvider.add(shapeFeatureProvider)\nfeatureProvider.add(statisticsFeatureProvider)\n\nfeatureExtractor = pycmc.FeatureExtractor(crag)\nfeatureExtractor.extractFeatures(nodeFeatures, edgeFeatures, featureProvider)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
import unittest
from wireless.trex_wireless_manager import APMode
from wireless.trex_wireless_manager_private import *
class APInfoTest(unittest.TestCase):
"""Tests methods for the APInfo class."""
def test_init_correct(self):
"""Test the __init__ method when parameters are correct."""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(ap.ip, '2.2.2.2')
def test_init_no_mac(self):
"""Test the __init__ method when parameter 'mac' is None.
Should raise an AttributeError.
"""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
with self.assertRaises(ValueError):
ap = APInfo(port_id=1, ip="2.2.2.2", mac=None, radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
def test_init_no_ip(self):
"""Test the __init__ method when parameter 'ip' is None.
Since the field is optional, it should pass.
"""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip=None, mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(ap.ip, None)
def test_str(self):
"""Test the __str__ method."""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')
self.assertEqual(str(ap), ap.name)
class ClientInfoTest(unittest.TestCase):
"""Tests methods for the ClientInfo class."""
def setUp(self):
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
self.ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
def test_init_correct(self):
"""Test the __init__ method when parameters are correct."""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip="3.3.3.3", ap_info=self.ap)
self.assertEqual(client.ip, "3.3.3.3")
self.assertEqual(client.ip_bytes, b'\x03\x03\x03\x03')
def test_init_no_mac(self):
"""Test the __init__ method when mandatory parameter 'mac' is None."""
with self.assertRaises(ValueError):
client = ClientInfo(None, ip="3.3.3.3", ap_info=self.ap)
def test_init_no_ip(self):
"""Test the __init__ method when parameter 'ip' is None.
Since the field is optional, it should pass.
"""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip=None, ap_info=self.ap)
self.assertEqual(client.ip, None)
self.assertEqual(client.ip_bytes, None)
def test_init_wrong_ap_type(self):
"""Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type."""
ap_wrong = object()
with self.assertRaises(ValueError):
client = ClientInfo("cc:cc:cc:cc:cc:cc",
ip="3.3.3.3", ap_info=ap_wrong)
def test_str(self):
"""Test the __str__ method."""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip="3.3.3.3", ap_info=self.ap)
self.assertEqual(str(client), "Client cc:cc:cc:cc:cc:cc - 3.3.3.3")
self.assertEqual(str(client), client.name)
|
normal
|
{
"blob_id": "ae5dfa7fa6a0d7349d6ae29aeac819903facb48f",
"index": 3518,
"step-1": "<mask token>\n\n\nclass APInfoTest(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip=None, mac='bb:bb:bb:bb:bb:bb', radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, None)\n <mask token>\n\n\nclass ClientInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the ClientInfo class.\"\"\"\n\n def setUp(self):\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n self.ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(client.ip, '3.3.3.3')\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when mandatory parameter 'mac' is None.\"\"\"\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip='3.3.3.3', ap_info=self.ap)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)\n\n def test_init_wrong_ap_type(self):\n \"\"\"Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.\"\"\"\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=\n ap_wrong)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(str(client), 'Client cc:cc:cc:cc:cc:cc - 3.3.3.3')\n self.assertEqual(str(client), client.name)\n",
"step-2": "<mask token>\n\n\nclass APInfoTest(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip=None, mac='bb:bb:bb:bb:bb:bb', radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, None)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')\n self.assertEqual(str(ap), ap.name)\n\n\nclass ClientInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the ClientInfo class.\"\"\"\n\n def setUp(self):\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n self.ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(client.ip, '3.3.3.3')\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when mandatory parameter 'mac' is None.\"\"\"\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip='3.3.3.3', ap_info=self.ap)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)\n\n def test_init_wrong_ap_type(self):\n \"\"\"Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.\"\"\"\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=\n ap_wrong)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(str(client), 'Client cc:cc:cc:cc:cc:cc - 3.3.3.3')\n self.assertEqual(str(client), client.name)\n",
"step-3": "<mask token>\n\n\nclass APInfoTest(unittest.TestCase):\n <mask token>\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, '2.2.2.2')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when parameter 'mac' is None.\n Should raise an AttributeError.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n with self.assertRaises(ValueError):\n ap = APInfo(port_id=1, ip='2.2.2.2', mac=None, radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL,\n rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=\n rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip=None, mac='bb:bb:bb:bb:bb:bb', radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, None)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')\n self.assertEqual(str(ap), ap.name)\n\n\nclass ClientInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the ClientInfo class.\"\"\"\n\n def setUp(self):\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n self.ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(client.ip, '3.3.3.3')\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when mandatory parameter 'mac' is None.\"\"\"\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip='3.3.3.3', ap_info=self.ap)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)\n\n def test_init_wrong_ap_type(self):\n \"\"\"Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.\"\"\"\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=\n ap_wrong)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(str(client), 'Client cc:cc:cc:cc:cc:cc - 3.3.3.3')\n self.assertEqual(str(client), client.name)\n",
"step-4": "<mask token>\n\n\nclass APInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the APInfo class.\"\"\"\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, '2.2.2.2')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when parameter 'mac' is None.\n Should raise an AttributeError.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n with self.assertRaises(ValueError):\n ap = APInfo(port_id=1, ip='2.2.2.2', mac=None, radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL,\n rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=\n rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip=None, mac='bb:bb:bb:bb:bb:bb', radio_mac=\n 'bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(ap.ip, None)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')\n self.assertEqual(str(ap), ap.name)\n\n\nclass ClientInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the ClientInfo class.\"\"\"\n\n def setUp(self):\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n self.ap = APInfo(port_id=1, ip='2.2.2.2', mac='bb:bb:bb:bb:bb:bb',\n radio_mac='bb:bb:bb:bb:bb:00', udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=\n rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=\n rsa_cert_file)\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(client.ip, '3.3.3.3')\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when mandatory parameter 'mac' is None.\"\"\"\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip='3.3.3.3', ap_info=self.ap)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)\n\n def test_init_wrong_ap_type(self):\n \"\"\"Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.\"\"\"\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=\n ap_wrong)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n client = ClientInfo('cc:cc:cc:cc:cc:cc', ip='3.3.3.3', ap_info=self.ap)\n self.assertEqual(str(client), 'Client cc:cc:cc:cc:cc:cc - 3.3.3.3')\n self.assertEqual(str(client), client.name)\n",
"step-5": "import sys\nimport os\nimport unittest\n\nfrom wireless.trex_wireless_manager import APMode\nfrom wireless.trex_wireless_manager_private import *\n\n\nclass APInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the APInfo class.\"\"\"\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(ap.ip, '2.2.2.2')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when parameter 'mac' is None.\n Should raise an AttributeError.\n \"\"\"\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n with self.assertRaises(ValueError):\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=None, radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')\n self.assertEqual(str(ap), ap.name)\n\n\nclass ClientInfoTest(unittest.TestCase):\n \"\"\"Tests methods for the ClientInfo class.\"\"\"\n\n def setUp(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n self.ap = APInfo(port_id=1, ip=\"2.2.2.2\", mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n\n def test_init_correct(self):\n \"\"\"Test the __init__ method when parameters are correct.\"\"\"\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=\"3.3.3.3\", ap_info=self.ap)\n self.assertEqual(client.ip, \"3.3.3.3\")\n self.assertEqual(client.ip_bytes, b'\\x03\\x03\\x03\\x03')\n\n def test_init_no_mac(self):\n \"\"\"Test the __init__ method when mandatory parameter 'mac' is None.\"\"\"\n with self.assertRaises(ValueError):\n client = ClientInfo(None, ip=\"3.3.3.3\", ap_info=self.ap)\n\n def test_init_no_ip(self):\n \"\"\"Test the __init__ method when parameter 'ip' is None.\n Since the field is optional, it should pass.\n \"\"\"\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)\n\n def test_init_wrong_ap_type(self):\n \"\"\"Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type.\"\"\"\n ap_wrong = object()\n with self.assertRaises(ValueError):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\",\n ip=\"3.3.3.3\", ap_info=ap_wrong)\n\n def test_str(self):\n \"\"\"Test the __str__ method.\"\"\"\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=\"3.3.3.3\", ap_info=self.ap)\n self.assertEqual(str(client), \"Client cc:cc:cc:cc:cc:cc - 3.3.3.3\")\n self.assertEqual(str(client), client.name)\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
<|reserved_special_token_0|>
class MouseButton:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MouseButton:
Left = 0
Right = 1
Middle = 2
<|reserved_special_token_1|>
class ClickAction:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MouseButton:
Left = 0
Right = 1
Middle = 2
<|reserved_special_token_1|>
class ClickAction:
Click = 0
DoubleClick = 1
class MouseButton:
Left = 0
Right = 1
Middle = 2
|
flexible
|
{
"blob_id": "cabebeb5ca02da2505df4a138e8b28f74dd108fa",
"index": 4362,
"step-1": "<mask token>\n\n\nclass MouseButton:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MouseButton:\n Left = 0\n Right = 1\n Middle = 2\n",
"step-3": "class ClickAction:\n <mask token>\n <mask token>\n\n\nclass MouseButton:\n Left = 0\n Right = 1\n Middle = 2\n",
"step-4": "class ClickAction:\n Click = 0\n DoubleClick = 1\n\n\nclass MouseButton:\n Left = 0\n Right = 1\n Middle = 2\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
cassandra = {'nodes': ['localhost'], 'keyspace': 'coffee'}
|
flexible
|
{
"blob_id": "0738fc48bc367f1df75567ab97ce20d3e747dc18",
"index": 8897,
"step-1": "<mask token>\n",
"step-2": "cassandra = {'nodes': ['localhost'], 'keyspace': 'coffee'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from bs4 import BeautifulSoup
import requests
import pymongo
client = pymongo.MongoClient('localhost', 27017)
ku = client['ku']
url_list1 = ku['url_list_index']
start_url="http://news.ccsu.cn/index.htm"
url_host="http://news.ccsu.cn/"
def get_channel_urls(url):
wb_data = requests.get(url)
wb_data.encoding = 'utf-8'
soup = BeautifulSoup(wb_data.text, "lxml")
links= soup.select("body > div.navWrap.clearfix > div > ul > li > a")
#print(links)
for link in links:
page_url =url_host + link.get("href")
url_list1.insert_one({'url': page_url})
print(page_url)
#print(link.text)
get_channel_urls(start_url)
ccsu_list = '''
http://news.ccsu.cn/index.htm
http://news.ccsu.cn/zdyw.htm
http://news.ccsu.cn/xysx.htm
http://news.ccsu.cn/mtjj.htm
http://news.ccsu.cn/xywh.htm
http://news.ccsu.cn/hdzt.htm
http://news.ccsu.cn/zdrw.htm
http://news.ccsu.cn/xbzx.htm
http://news.ccsu.cn/tzgg.htm
http://news.ccsu.cn/zlxz.htm
http://news.ccsu.cn/jxzd.htm
'''
|
normal
|
{
"blob_id": "4791b210f328dff5d48ff5afc381a98a5a1a2b7b",
"index": 1969,
"step-1": "<mask token>\n\n\ndef get_channel_urls(url):\n wb_data = requests.get(url)\n wb_data.encoding = 'utf-8'\n soup = BeautifulSoup(wb_data.text, 'lxml')\n links = soup.select('body > div.navWrap.clearfix > div > ul > li > a')\n for link in links:\n page_url = url_host + link.get('href')\n url_list1.insert_one({'url': page_url})\n print(page_url)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_channel_urls(url):\n wb_data = requests.get(url)\n wb_data.encoding = 'utf-8'\n soup = BeautifulSoup(wb_data.text, 'lxml')\n links = soup.select('body > div.navWrap.clearfix > div > ul > li > a')\n for link in links:\n page_url = url_host + link.get('href')\n url_list1.insert_one({'url': page_url})\n print(page_url)\n\n\nget_channel_urls(start_url)\n<mask token>\n",
"step-3": "<mask token>\nclient = pymongo.MongoClient('localhost', 27017)\nku = client['ku']\nurl_list1 = ku['url_list_index']\nstart_url = 'http://news.ccsu.cn/index.htm'\nurl_host = 'http://news.ccsu.cn/'\n\n\ndef get_channel_urls(url):\n wb_data = requests.get(url)\n wb_data.encoding = 'utf-8'\n soup = BeautifulSoup(wb_data.text, 'lxml')\n links = soup.select('body > div.navWrap.clearfix > div > ul > li > a')\n for link in links:\n page_url = url_host + link.get('href')\n url_list1.insert_one({'url': page_url})\n print(page_url)\n\n\nget_channel_urls(start_url)\nccsu_list = \"\"\"\nhttp://news.ccsu.cn/index.htm\nhttp://news.ccsu.cn/zdyw.htm\nhttp://news.ccsu.cn/xysx.htm\nhttp://news.ccsu.cn/mtjj.htm\nhttp://news.ccsu.cn/xywh.htm\nhttp://news.ccsu.cn/hdzt.htm\nhttp://news.ccsu.cn/zdrw.htm\nhttp://news.ccsu.cn/xbzx.htm\nhttp://news.ccsu.cn/tzgg.htm\nhttp://news.ccsu.cn/zlxz.htm\nhttp://news.ccsu.cn/jxzd.htm\n\n\"\"\"\n",
"step-4": "from bs4 import BeautifulSoup\nimport requests\nimport pymongo\nclient = pymongo.MongoClient('localhost', 27017)\nku = client['ku']\nurl_list1 = ku['url_list_index']\nstart_url = 'http://news.ccsu.cn/index.htm'\nurl_host = 'http://news.ccsu.cn/'\n\n\ndef get_channel_urls(url):\n wb_data = requests.get(url)\n wb_data.encoding = 'utf-8'\n soup = BeautifulSoup(wb_data.text, 'lxml')\n links = soup.select('body > div.navWrap.clearfix > div > ul > li > a')\n for link in links:\n page_url = url_host + link.get('href')\n url_list1.insert_one({'url': page_url})\n print(page_url)\n\n\nget_channel_urls(start_url)\nccsu_list = \"\"\"\nhttp://news.ccsu.cn/index.htm\nhttp://news.ccsu.cn/zdyw.htm\nhttp://news.ccsu.cn/xysx.htm\nhttp://news.ccsu.cn/mtjj.htm\nhttp://news.ccsu.cn/xywh.htm\nhttp://news.ccsu.cn/hdzt.htm\nhttp://news.ccsu.cn/zdrw.htm\nhttp://news.ccsu.cn/xbzx.htm\nhttp://news.ccsu.cn/tzgg.htm\nhttp://news.ccsu.cn/zlxz.htm\nhttp://news.ccsu.cn/jxzd.htm\n\n\"\"\"\n",
"step-5": "from bs4 import BeautifulSoup\nimport requests\nimport pymongo\nclient = pymongo.MongoClient('localhost', 27017)\nku = client['ku']\nurl_list1 = ku['url_list_index']\nstart_url=\"http://news.ccsu.cn/index.htm\"\nurl_host=\"http://news.ccsu.cn/\"\ndef get_channel_urls(url):\n wb_data = requests.get(url)\n wb_data.encoding = 'utf-8'\n soup = BeautifulSoup(wb_data.text, \"lxml\")\n links= soup.select(\"body > div.navWrap.clearfix > div > ul > li > a\")\n #print(links)\n for link in links:\n page_url =url_host + link.get(\"href\")\n url_list1.insert_one({'url': page_url})\n print(page_url)\n #print(link.text)\n\nget_channel_urls(start_url)\n\nccsu_list = '''\nhttp://news.ccsu.cn/index.htm\nhttp://news.ccsu.cn/zdyw.htm\nhttp://news.ccsu.cn/xysx.htm\nhttp://news.ccsu.cn/mtjj.htm\nhttp://news.ccsu.cn/xywh.htm\nhttp://news.ccsu.cn/hdzt.htm\nhttp://news.ccsu.cn/zdrw.htm\nhttp://news.ccsu.cn/xbzx.htm\nhttp://news.ccsu.cn/tzgg.htm\nhttp://news.ccsu.cn/zlxz.htm\nhttp://news.ccsu.cn/jxzd.htm\n\n'''",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
import requests
import json
url = "http://39.108.188.34:9090/spider/zhongdengdengji.go"
# url = "http://localhost:9090/spider/zhongdengdengji.go"
input = {
"timelimit": "1年",
"title": "GD20190305001",
"maincontractno": "YT20181228001",
"maincontractcurrency": "人民币",
"maincontractsum": "100000",
"description": "Y0181228001测试供应商有限公司与测试项目有限公司就SW00002-20181226-1204,转让应收账款金额100000元T2,测试供应商有限公司已出具应收账款转让通知书,对应的发票号及金额为1111/50000,5555/50000,到期日2018-12-29。付款方万科企业股份有限公司已出具编号为ZB00002-20181226-1204的付款确认及授权书",
"addDebtorList": [
{
# 金融机构
"debtorType": "企业",
"debtorName": "测试供应商有限公司",
"orgCode": "9144030068375453XL",
"businessCode": "9144030068375453XL",
"lei": "#*¥#*(&¥#(*&¥()",
"responsiblePerson": "测试法人1",
"country": "中国",
"province": "黑龙江省",
"city": "哈尔滨市",
"address": "北京天安门",
}
]
}
data = json.dumps(input)
headers = {
'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
"Content-Type": "application/json"}
response = requests.post(url, data=data, headers=headers, timeout=(500, 500))
print(response.text)
# testAccount = [{'account': 'ytbl0011', 'keyword': 'ytbl0011aDmin'}]
|
normal
|
{
"blob_id": "ad024a2001dc6a6fa3a2a9c1b51f79132e914897",
"index": 7592,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(response.text)\n",
"step-3": "<mask token>\nurl = 'http://39.108.188.34:9090/spider/zhongdengdengji.go'\ninput = {'timelimit': '1年', 'title': 'GD20190305001', 'maincontractno':\n 'YT20181228001', 'maincontractcurrency': '人民币', 'maincontractsum':\n '100000', 'description':\n 'Y0181228001测试供应商有限公司与测试项目有限公司就SW00002-20181226-1204,转让应收账款金额100000元T2,测试供应商有限公司已出具应收账款转让通知书,对应的发票号及金额为1111/50000,5555/50000,到期日2018-12-29。付款方万科企业股份有限公司已出具编号为ZB00002-20181226-1204的付款确认及授权书'\n , 'addDebtorList': [{'debtorType': '企业', 'debtorName': '测试供应商有限公司',\n 'orgCode': '9144030068375453XL', 'businessCode': '9144030068375453XL',\n 'lei': '#*¥#*(&¥#(*&¥()', 'responsiblePerson': '测试法人1', 'country': '中国',\n 'province': '黑龙江省', 'city': '哈尔滨市', 'address': '北京天安门'}]}\ndata = json.dumps(input)\nheaders = {'User-Agent':\n 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n , 'Content-Type': 'application/json'}\nresponse = requests.post(url, data=data, headers=headers, timeout=(500, 500))\nprint(response.text)\n",
"step-4": "import requests\nimport json\nurl = 'http://39.108.188.34:9090/spider/zhongdengdengji.go'\ninput = {'timelimit': '1年', 'title': 'GD20190305001', 'maincontractno':\n 'YT20181228001', 'maincontractcurrency': '人民币', 'maincontractsum':\n '100000', 'description':\n 'Y0181228001测试供应商有限公司与测试项目有限公司就SW00002-20181226-1204,转让应收账款金额100000元T2,测试供应商有限公司已出具应收账款转让通知书,对应的发票号及金额为1111/50000,5555/50000,到期日2018-12-29。付款方万科企业股份有限公司已出具编号为ZB00002-20181226-1204的付款确认及授权书'\n , 'addDebtorList': [{'debtorType': '企业', 'debtorName': '测试供应商有限公司',\n 'orgCode': '9144030068375453XL', 'businessCode': '9144030068375453XL',\n 'lei': '#*¥#*(&¥#(*&¥()', 'responsiblePerson': '测试法人1', 'country': '中国',\n 'province': '黑龙江省', 'city': '哈尔滨市', 'address': '北京天安门'}]}\ndata = json.dumps(input)\nheaders = {'User-Agent':\n 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n , 'Content-Type': 'application/json'}\nresponse = requests.post(url, data=data, headers=headers, timeout=(500, 500))\nprint(response.text)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport requests\nimport json\n\nurl = \"http://39.108.188.34:9090/spider/zhongdengdengji.go\"\n# url = \"http://localhost:9090/spider/zhongdengdengji.go\"\n\ninput = {\n \"timelimit\": \"1年\",\n \"title\": \"GD20190305001\",\n \"maincontractno\": \"YT20181228001\",\n \"maincontractcurrency\": \"人民币\",\n \"maincontractsum\": \"100000\",\n \"description\": \"Y0181228001测试供应商有限公司与测试项目有限公司就SW00002-20181226-1204,转让应收账款金额100000元T2,测试供应商有限公司已出具应收账款转让通知书,对应的发票号及金额为1111/50000,5555/50000,到期日2018-12-29。付款方万科企业股份有限公司已出具编号为ZB00002-20181226-1204的付款确认及授权书\",\n \"addDebtorList\": [\n {\n # 金融机构\n \"debtorType\": \"企业\",\n \"debtorName\": \"测试供应商有限公司\",\n \"orgCode\": \"9144030068375453XL\",\n \"businessCode\": \"9144030068375453XL\",\n \"lei\": \"#*¥#*(&¥#(*&¥()\",\n \"responsiblePerson\": \"测试法人1\",\n \"country\": \"中国\",\n \"province\": \"黑龙江省\",\n \"city\": \"哈尔滨市\",\n \"address\": \"北京天安门\",\n }\n\n ]\n\n}\n\ndata = json.dumps(input)\n\nheaders = {\n 'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n \"Content-Type\": \"application/json\"}\nresponse = requests.post(url, data=data, headers=headers, timeout=(500, 500))\nprint(response.text)\n\n# testAccount = [{'account': 'ytbl0011', 'keyword': 'ytbl0011aDmin'}]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import requests
import random
import boto3
from email.parser import BytesParser, Parser
from email.policy import default
##################################
endpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'
##################################
def get_msg_body(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif type == 'text':
return msg.get_payload()
def lambda_handler(event, context):
s3_bucket = event['Records'][0]['s3']['bucket']['name']
s3_key = event['Records'][0]['s3']['object']['key']
# s3_bucket = 'hw3-storemails'
# s3_key = '097caauj2ee2puftdrlohllf5748p70e1seovc81'
client = boto3.client('s3')
data = client.get_object(Bucket=s3_bucket, Key=s3_key)
contents = data['Body'].read()
msg = Parser(policy=default).parsestr(contents.decode('ascii'))
frm = msg['from']
to = msg['to']
time = msg['date']
subject = msg['subject']
body = get_msg_body(msg)
body = " ".join(body.split()).strip()
print(time)
r = requests.post(endpoint, data = {'data':body}, headers = {'Content-Type': 'application/x-www-form-urlencoded'})
r = json.loads(r.text)
print(r)
label = int(float(r['predicted_label']))
if label == 1:
label = 'SPAM'
else: label = 'HAM'
p = float(r['predicted_probability'])
print(label, p)
if len(body)>250: body = body[0:250]
return_msg = 'We received your email sent at ' +\
time + 'with the subject \'' + subject +\
'\'.\n\nHere is a 240 character sample of the email body:\n\n' +\
body + '\n\nThe email was categorized as ' + label +\
' with a ' + str(p) + ' % confidence.'
client = boto3.client('ses')
status = client.send_email(
Source='hamspamreply@hw3tiz2102.xyz',
Destination={
'ToAddresses': [
frm,
],
},
Message={
'Subject': {
'Data': 'Ham/Spam Analysis'
},
'Body': {
'Text': {
'Data': return_msg,
}
}
},
)
print(status)
return {
'statusCode': 200,
'body': json.dumps('LF2 successfull!')
}
|
normal
|
{
"blob_id": "cc99811321083147540a00e8029b792c8afc2ada",
"index": 3233,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='hamspamreply@hw3tiz2102.xyz',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-3": "<mask token>\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='hamspamreply@hw3tiz2102.xyz',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-4": "import json\nimport requests\nimport random\nimport boto3\nfrom email.parser import BytesParser, Parser\nfrom email.policy import default\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='hamspamreply@hw3tiz2102.xyz',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-5": "import json\nimport requests\nimport random\nimport boto3\nfrom email.parser import BytesParser, Parser\nfrom email.policy import default\n\n##################################\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n##################################\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\ndef lambda_handler(event, context):\n \n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n \n# s3_bucket = 'hw3-storemails'\n# s3_key = '097caauj2ee2puftdrlohllf5748p70e1seovc81'\n \n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n \n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n \n body = get_msg_body(msg)\n body = \" \".join(body.split()).strip()\n \n print(time)\n \n r = requests.post(endpoint, data = {'data':body}, headers = {'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n \n print(r)\n\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else: label = 'HAM'\n p = float(r['predicted_probability'])\n \n print(label, p)\n \n if len(body)>250: body = body[0:250]\n \n return_msg = 'We received your email sent at ' +\\\n time + 'with the subject \\'' + subject +\\\n '\\'.\\n\\nHere is a 240 character sample of the email body:\\n\\n' +\\\n body + '\\n\\nThe email was categorized as ' + label +\\\n ' with a ' + str(p) + ' % confidence.'\n\n client = boto3.client('ses')\n\n status = client.send_email(\n Source='hamspamreply@hw3tiz2102.xyz',\n Destination={\n 'ToAddresses': [\n frm,\n ],\n },\n Message={\n 'Subject': {\n 'Data': 'Ham/Spam Analysis'\n \n },\n 'Body': {\n 'Text': {\n 'Data': return_msg,\n }\n }\n },\n )\n \n print(status)\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('LF2 successfull!')\n }\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# -*- coding: cp1251 -*-
import arcpy as a
from arcpy import AddMessage as msg, AddWarning as warning, AddError as error
from os import mkdir, walk
from os.path import join, dirname, basename, splitext
from glob import glob as get_files
from shutil import copy
from collections import OrderedDict
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs, _ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root, dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''):dirs for root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)
msg("\nПроверка на наличие подпапок исходной папки в выходной:")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("\nПроверка на наличие подпапок выходной папки в исходной:")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("\nКопирование файлов в папки...")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
if not tab_files:
remove_list.append(subfolders)
if u"Импорт" in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, "*.*"))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("\nСоздание баз данных...")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + ".mdb")
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name + ".mdb"))
try:
a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + ".mdb")
msg(" " + mdb_local_path)
except a.ExecuteError:
warning(" " + mdb_local_path)
msg("\nКонвертация TAB в слои...")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))
|
normal
|
{
"blob_id": "409e0fc0b1c1d86c5526d33ba271a8387eecf748",
"index": 9872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\n<mask token>\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-3": "<mask token>\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-4": "import arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-5": "# -*- coding: cp1251 -*-\nimport arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\n\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\n\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\n\n\n\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs, _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root, dirs, _ in walk(output_folder)]\n\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''):dirs for root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):dirs for root, dirs, _ in walk(output_folder)}\n\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)\n\nmsg(\"\\nПроверка на наличие подпапок исходной папки в выходной:\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\n\nmsg(\"\\nПроверка на наличие подпапок выходной папки в исходной:\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\n\n\nmsg(\"\\nКопирование файлов в папки...\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n if not tab_files:\n remove_list.append(subfolders)\n\n if u\"Импорт\" in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n\n msg(' ' + subfolders)\n\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, \"*.*\"))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\noutput_folders.pop('', None)\n\n\n\nmsg(\"\\nСоздание баз данных...\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + \".mdb\")\n\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name + \".mdb\"))\n\n try:\n a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + \".mdb\")\n msg(\" \" + mdb_local_path)\n except a.ExecuteError:\n warning(\" \" + mdb_local_path)\n\n\n\nmsg(\"\\nКонвертация TAB в слои...\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\n\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n\n a.Exists(layer_from)\n\n if not a.Exists(layer_from):\n continue\n\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
import re
# 普通字符串 匹配本身
re_str = r'abc'
result = re.fullmatch(re_str, 'abc')
print(result)
# 匹配任意字符 一个.只能匹配一个字符
re_str = r'a.c'
result = re.fullmatch(re_str, 'abc')
print(result)
# \w匹配字母数字或下划线
# 匹配一个长度是5的字符串并且字符串的前两位是数字字母或者下划线后面是三个任意字符串 \w中文也能匹配
re_str = r'\w\w...'
result = re.fullmatch(re_str, '_a123')
print(result)
# \s匹配空白字符
# 空白字符串包括空格,制表符,换行符:\t,\r,\n
re_str = r'\w\w\s\w'
result = re.fullmatch(re_str, 'hj\t8')
print(result)
# \d匹配数字字符
re_str = r'\d\d\d..'
result = re.fullmatch(re_str, '082ww')
print(result)
# \b检测单词边界
re_str = r'hello\bworld'
result = re.fullmatch(re_str, 'hello world')
print(result)
re_str = r'\bhello,\bworld'
result = re.fullmatch(re_str, 'hello,world')
print(result)
# ^检测字符串开头
re_str = r'^The..'
result = re.fullmatch(re_str, 'The2;')
print(result)
# $检测字符串结尾
re_str = r'The$'
result = re.fullmatch(re_str, 'The')
print(result)
# \大写字母对应的功能是\小写字母功能取反
# \W 匹配非字母数字下划线
# \D 匹配非数字字符
# \S 匹配空白字符串
# \B 检测非单词边界
re_str = r'\d\D\s\s\Ba'
print(re.fullmatch(re_str, '2a a'))
# 字符集
# 匹配中括号出现的任意一个字符
re_str = r'\d[bcd]'
result = re.fullmatch(re_str, '2d')
print(result)
# [a-z] 表示匹配所有的小写字母
# [A_Z] 表示匹配所有的大写字母
# [a-zA-Z] 匹配所有的字母
# [1-7] 匹配数字字符1到7
# [\u4e00-\u9fa5] 匹配所有的中文
# [字符1字符2-] 这儿的-表示减号本身
re_str = r'[1-7][abc-][a-z]'
result = re.fullmatch(re_str, '3-b')
print(result)
# [^abc] 匹配不再abc以外的任意一个字符
# [^\d] 匹配除了数字字符以外的任意一个字符
# [^a-z] 匹配除了小写字母以外的其他任意一个字符
# [abc^] 匹配abc^中的任意一个字符
re_str = r'[^a-z]'
result = re.fullmatch(re_str, '是')
print(result)
# 正则控制匹配次数
# *(匹配0次或者多次) a* a出现0次或多次 \d* 任意数字出现0次或多次 [abc]* a,b,c出现0次或多次 [A-F] A到F中任意字符出现0次或多次
print(re.fullmatch(r'a*b', 'b'))
# +(匹配1次或者多次)
print(re.fullmatch(r'a+b', 'aaaab'))
# ?(匹配0次或1次)
print(re.fullmatch(r'[+-]?[1-9]\d*', '+145345'))
# {N} 匹配N次 a{3} 匹配三个a
# {M,N}} 匹配M到N次
# {,N} 最多匹配N次
# {M,} 至少匹配M次
re_str = r'[a-zA-Z][a-zA-Z\d]{5,11}'
# str1 = input('请输入密码:')
str1 = 'ab123456'
result = re.fullmatch(re_str, str1)
if result:
print('密码正确')
else:
print('密码错误')
# 分之、捕获、贪婪
# 分之 条件1|条件2 匹配条件1或条件2
# \d{2}|[a-z] 匹配两个数字字符或者一个小写字母
# 正则中的分之也会出现短路,当条件1可以匹配就不会在使用条件2匹配
re_str = r'[-+]?[1-9]\d*[.]?\d*|[-+]?0[.][0-9]*[1-9]|0'
result = re.fullmatch(re_str, '0.0000009')
print(result)
# 捕获 通过正则获取符合条件的字串的时候可以在正则表达式中加括号,匹配后之获取括号里面匹配到的内容
# re.findall(正则表达式,字符串) 在字符串中获取符合正则表达式条件的所有的字串返回一个列表
str1 = 'ahs123+34asdf24'
print(re.findall(r'\d+', str1))
str2 = 'a153s123+34asfa24'
print(re.findall(r'a\d+', str2))
print(re.findall(r'a(\d+)', str2))
str3 = 'http://www.qq.com'
print(re.findall(r'^(http://)?www.(\w+).com', str3))
# 重复匹配 带多个分组的正则表达式可以在分组的后面通过添加\数字来重复前面第几个分组中匹配到的内容
re_str = r'(\d{3})([a-z]{2})a\1{2}-\2'
print(re.findall(re_str, '123efa123123-ef'))
# 贪婪 匹配次数后加?就是贪婪匹配:*?,+?,??,{M,N}?,{M,}?表示尽可能少的重复
re_str = 'a.+b'
re_str1 = 'a.+?b'
str1 = 'xxahdjbnnkhasssbkkkkk'
print(re.findall(re_str, str1))
print(re.findall(re_str1, str1))
# 转义字符 \
re_str = r'a\+\(\d{2}\)'
print(re.fullmatch(re_str, 'a+(23)'))
# re模块
# complie
re_str = r'\d{3}'
re_obj = re.compile(re_str)
print(re_obj.fullmatch('234'))
# match 不完全匹配之匹配字符串开头 之匹配字符串开头 匹配成功返回匹配对象匹配失败返回None
# fullmatch 完全匹配从字符串开头匹配到字符串结束
re_str = r'\d([A-Z]{2})'
result = re.fullmatch(re_str, '2HKdfsd')
print(result)
result = re.match(re_str, '8KLsifdfd==')
print(result)
# 匹配对象
# start,end 获取匹配结果的开始下标和结束下标
# 匹配对象.start(n)/匹配对象.end(n) 获取正则表达式中第n个分组匹配到的开始下标/结束下标
print(result.start(), result.end())
# print(result.start(1), result.end(2))
# ggroup 获取匹配到的内容
# 匹配对象.group() 获取整个正则表达式匹配到的内容
# 匹配对象.group(n) 获取正则表达式第n个分组匹配到的内容
print(result.group())
print(result.group(1))
# string 获取匹配的原字符串
# 匹配对象.string
print(result.string)
# search
# search(正则表达式,字符串)匹配字符串中第一个满足正则表达式的字串,如果匹配成功返回匹配对象否则返回None
str1 = 'abc123hks362shjjk990kll'
result = re.search(r'\d{3}[a-z]{2}', str1)
print(result)
# split split(正则表达式,字符串) 在字符串中按照满足正则表达式条件的字串对字符串进行切割
str1 = 'ab+c7hdjd8jss-sk9s9kk*k'
result = re.split(r'\d+|[+*-]+', str1)
print(result)
# findall findall(正则表达式,字符串) 在字符串中获取满足正则表达式的所有的字符返回一个列表列表元素是字符串
str = 'abcd1235asdf'
result = re.findall(r'a[a-zA-Z]+', str)
print(result)
# finditer finditer(正则表达式,字符串) 获取字符串中满足正则表达式的内容返回的是一个迭代器
# def yt_finditer(pattern, string):
# re1 = re.search(pattern, string)
# while re1:
# yield re1
# string = string[re1.end():]
# re1 = re.search(pattern, string)
#
# str1='haja37jjkd89sdhs909nnna238==='
# result = yt_finditer(r'[a-zA-Z]{2,}(\d+)(a-z)+?', str1)
# print(next(result))
|
normal
|
{
"blob_id": "e0e00688a75021c2f8b608d4c942f5e68f6a6a48",
"index": 6282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(re.fullmatch(re_str, '2a a'))\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\nprint(re.fullmatch('a*b', 'b'))\nprint(re.fullmatch('a+b', 'aaaab'))\nprint(re.fullmatch('[+-]?[1-9]\\\\d*', '+145345'))\n<mask token>\nif result:\n print('密码正确')\nelse:\n print('密码错误')\n<mask token>\nprint(result)\n<mask token>\nprint(re.findall('\\\\d+', str1))\n<mask token>\nprint(re.findall('a\\\\d+', str2))\nprint(re.findall('a(\\\\d+)', str2))\n<mask token>\nprint(re.findall('^(http://)?www.(\\\\w+).com', str3))\n<mask token>\nprint(re.findall(re_str, '123efa123123-ef'))\n<mask token>\nprint(re.findall(re_str, str1))\nprint(re.findall(re_str1, str1))\n<mask token>\nprint(re.fullmatch(re_str, 'a+(23)'))\n<mask token>\nprint(re_obj.fullmatch('234'))\n<mask token>\nprint(result)\n<mask token>\nprint(result)\nprint(result.start(), result.end())\nprint(result.group())\nprint(result.group(1))\nprint(result.string)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n<mask token>\nprint(result)\n",
"step-3": "<mask token>\nre_str = 'abc'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\nre_str = 'a.c'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\nre_str = '\\\\w\\\\w...'\nresult = re.fullmatch(re_str, '_a123')\nprint(result)\nre_str = '\\\\w\\\\w\\\\s\\\\w'\nresult = re.fullmatch(re_str, 'hj\\t8')\nprint(result)\nre_str = '\\\\d\\\\d\\\\d..'\nresult = re.fullmatch(re_str, '082ww')\nprint(result)\nre_str = 'hello\\\\bworld'\nresult = re.fullmatch(re_str, 'hello world')\nprint(result)\nre_str = '\\\\bhello,\\\\bworld'\nresult = re.fullmatch(re_str, 'hello,world')\nprint(result)\nre_str = '^The..'\nresult = re.fullmatch(re_str, 'The2;')\nprint(result)\nre_str = 'The$'\nresult = re.fullmatch(re_str, 'The')\nprint(result)\nre_str = '\\\\d\\\\D\\\\s\\\\s\\\\Ba'\nprint(re.fullmatch(re_str, '2a a'))\nre_str = '\\\\d[bcd]'\nresult = re.fullmatch(re_str, '2d')\nprint(result)\nre_str = '[1-7][abc-][a-z]'\nresult = re.fullmatch(re_str, '3-b')\nprint(result)\nre_str = '[^a-z]'\nresult = re.fullmatch(re_str, '是')\nprint(result)\nprint(re.fullmatch('a*b', 'b'))\nprint(re.fullmatch('a+b', 'aaaab'))\nprint(re.fullmatch('[+-]?[1-9]\\\\d*', '+145345'))\nre_str = '[a-zA-Z][a-zA-Z\\\\d]{5,11}'\nstr1 = 'ab123456'\nresult = re.fullmatch(re_str, str1)\nif result:\n print('密码正确')\nelse:\n print('密码错误')\nre_str = '[-+]?[1-9]\\\\d*[.]?\\\\d*|[-+]?0[.][0-9]*[1-9]|0'\nresult = re.fullmatch(re_str, '0.0000009')\nprint(result)\nstr1 = 'ahs123+34asdf24'\nprint(re.findall('\\\\d+', str1))\nstr2 = 'a153s123+34asfa24'\nprint(re.findall('a\\\\d+', str2))\nprint(re.findall('a(\\\\d+)', str2))\nstr3 = 'http://www.qq.com'\nprint(re.findall('^(http://)?www.(\\\\w+).com', str3))\nre_str = '(\\\\d{3})([a-z]{2})a\\\\1{2}-\\\\2'\nprint(re.findall(re_str, '123efa123123-ef'))\nre_str = 'a.+b'\nre_str1 = 'a.+?b'\nstr1 = 'xxahdjbnnkhasssbkkkkk'\nprint(re.findall(re_str, str1))\nprint(re.findall(re_str1, str1))\nre_str = 'a\\\\+\\\\(\\\\d{2}\\\\)'\nprint(re.fullmatch(re_str, 'a+(23)'))\nre_str = '\\\\d{3}'\nre_obj = re.compile(re_str)\nprint(re_obj.fullmatch('234'))\nre_str = '\\\\d([A-Z]{2})'\nresult = re.fullmatch(re_str, '2HKdfsd')\nprint(result)\nresult = re.match(re_str, '8KLsifdfd==')\nprint(result)\nprint(result.start(), result.end())\nprint(result.group())\nprint(result.group(1))\nprint(result.string)\nstr1 = 'abc123hks362shjjk990kll'\nresult = re.search('\\\\d{3}[a-z]{2}', str1)\nprint(result)\nstr1 = 'ab+c7hdjd8jss-sk9s9kk*k'\nresult = re.split('\\\\d+|[+*-]+', str1)\nprint(result)\nstr = 'abcd1235asdf'\nresult = re.findall('a[a-zA-Z]+', str)\nprint(result)\n",
"step-4": "import re\nre_str = 'abc'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\nre_str = 'a.c'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\nre_str = '\\\\w\\\\w...'\nresult = re.fullmatch(re_str, '_a123')\nprint(result)\nre_str = '\\\\w\\\\w\\\\s\\\\w'\nresult = re.fullmatch(re_str, 'hj\\t8')\nprint(result)\nre_str = '\\\\d\\\\d\\\\d..'\nresult = re.fullmatch(re_str, '082ww')\nprint(result)\nre_str = 'hello\\\\bworld'\nresult = re.fullmatch(re_str, 'hello world')\nprint(result)\nre_str = '\\\\bhello,\\\\bworld'\nresult = re.fullmatch(re_str, 'hello,world')\nprint(result)\nre_str = '^The..'\nresult = re.fullmatch(re_str, 'The2;')\nprint(result)\nre_str = 'The$'\nresult = re.fullmatch(re_str, 'The')\nprint(result)\nre_str = '\\\\d\\\\D\\\\s\\\\s\\\\Ba'\nprint(re.fullmatch(re_str, '2a a'))\nre_str = '\\\\d[bcd]'\nresult = re.fullmatch(re_str, '2d')\nprint(result)\nre_str = '[1-7][abc-][a-z]'\nresult = re.fullmatch(re_str, '3-b')\nprint(result)\nre_str = '[^a-z]'\nresult = re.fullmatch(re_str, '是')\nprint(result)\nprint(re.fullmatch('a*b', 'b'))\nprint(re.fullmatch('a+b', 'aaaab'))\nprint(re.fullmatch('[+-]?[1-9]\\\\d*', '+145345'))\nre_str = '[a-zA-Z][a-zA-Z\\\\d]{5,11}'\nstr1 = 'ab123456'\nresult = re.fullmatch(re_str, str1)\nif result:\n print('密码正确')\nelse:\n print('密码错误')\nre_str = '[-+]?[1-9]\\\\d*[.]?\\\\d*|[-+]?0[.][0-9]*[1-9]|0'\nresult = re.fullmatch(re_str, '0.0000009')\nprint(result)\nstr1 = 'ahs123+34asdf24'\nprint(re.findall('\\\\d+', str1))\nstr2 = 'a153s123+34asfa24'\nprint(re.findall('a\\\\d+', str2))\nprint(re.findall('a(\\\\d+)', str2))\nstr3 = 'http://www.qq.com'\nprint(re.findall('^(http://)?www.(\\\\w+).com', str3))\nre_str = '(\\\\d{3})([a-z]{2})a\\\\1{2}-\\\\2'\nprint(re.findall(re_str, '123efa123123-ef'))\nre_str = 'a.+b'\nre_str1 = 'a.+?b'\nstr1 = 'xxahdjbnnkhasssbkkkkk'\nprint(re.findall(re_str, str1))\nprint(re.findall(re_str1, str1))\nre_str = 'a\\\\+\\\\(\\\\d{2}\\\\)'\nprint(re.fullmatch(re_str, 'a+(23)'))\nre_str = '\\\\d{3}'\nre_obj = re.compile(re_str)\nprint(re_obj.fullmatch('234'))\nre_str = '\\\\d([A-Z]{2})'\nresult = re.fullmatch(re_str, '2HKdfsd')\nprint(result)\nresult = re.match(re_str, '8KLsifdfd==')\nprint(result)\nprint(result.start(), result.end())\nprint(result.group())\nprint(result.group(1))\nprint(result.string)\nstr1 = 'abc123hks362shjjk990kll'\nresult = re.search('\\\\d{3}[a-z]{2}', str1)\nprint(result)\nstr1 = 'ab+c7hdjd8jss-sk9s9kk*k'\nresult = re.split('\\\\d+|[+*-]+', str1)\nprint(result)\nstr = 'abcd1235asdf'\nresult = re.findall('a[a-zA-Z]+', str)\nprint(result)\n",
"step-5": "# -*- coding:utf-8 -*-\nimport re\n\n# 普通字符串 匹配本身\nre_str = r'abc'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\n# 匹配任意字符 一个.只能匹配一个字符\nre_str = r'a.c'\nresult = re.fullmatch(re_str, 'abc')\nprint(result)\n# \\w匹配字母数字或下划线\n# 匹配一个长度是5的字符串并且字符串的前两位是数字字母或者下划线后面是三个任意字符串 \\w中文也能匹配\nre_str = r'\\w\\w...'\nresult = re.fullmatch(re_str, '_a123')\nprint(result)\n# \\s匹配空白字符\n# 空白字符串包括空格,制表符,换行符:\\t,\\r,\\n\nre_str = r'\\w\\w\\s\\w'\nresult = re.fullmatch(re_str, 'hj\\t8')\nprint(result)\n# \\d匹配数字字符\nre_str = r'\\d\\d\\d..'\nresult = re.fullmatch(re_str, '082ww')\nprint(result)\n# \\b检测单词边界\nre_str = r'hello\\bworld'\nresult = re.fullmatch(re_str, 'hello world')\nprint(result)\nre_str = r'\\bhello,\\bworld'\nresult = re.fullmatch(re_str, 'hello,world')\nprint(result)\n# ^检测字符串开头\nre_str = r'^The..'\nresult = re.fullmatch(re_str, 'The2;')\nprint(result)\n# $检测字符串结尾\nre_str = r'The$'\nresult = re.fullmatch(re_str, 'The')\nprint(result)\n# \\大写字母对应的功能是\\小写字母功能取反\n# \\W 匹配非字母数字下划线\n# \\D 匹配非数字字符\n# \\S 匹配空白字符串\n# \\B 检测非单词边界\nre_str = r'\\d\\D\\s\\s\\Ba'\nprint(re.fullmatch(re_str, '2a a'))\n# 字符集\n# 匹配中括号出现的任意一个字符\nre_str = r'\\d[bcd]'\nresult = re.fullmatch(re_str, '2d')\nprint(result)\n# [a-z] 表示匹配所有的小写字母\n# [A_Z] 表示匹配所有的大写字母\n# [a-zA-Z] 匹配所有的字母\n# [1-7] 匹配数字字符1到7\n# [\\u4e00-\\u9fa5] 匹配所有的中文\n# [字符1字符2-] 这儿的-表示减号本身\nre_str = r'[1-7][abc-][a-z]'\nresult = re.fullmatch(re_str, '3-b')\nprint(result)\n# [^abc] 匹配不再abc以外的任意一个字符\n# [^\\d] 匹配除了数字字符以外的任意一个字符\n# [^a-z] 匹配除了小写字母以外的其他任意一个字符\n# [abc^] 匹配abc^中的任意一个字符\nre_str = r'[^a-z]'\nresult = re.fullmatch(re_str, '是')\nprint(result)\n# 正则控制匹配次数\n# *(匹配0次或者多次) a* a出现0次或多次 \\d* 任意数字出现0次或多次 [abc]* a,b,c出现0次或多次 [A-F] A到F中任意字符出现0次或多次\nprint(re.fullmatch(r'a*b', 'b'))\n# +(匹配1次或者多次)\nprint(re.fullmatch(r'a+b', 'aaaab'))\n# ?(匹配0次或1次)\nprint(re.fullmatch(r'[+-]?[1-9]\\d*', '+145345'))\n# {N} 匹配N次 a{3} 匹配三个a\n# {M,N}} 匹配M到N次\n# {,N} 最多匹配N次\n# {M,} 至少匹配M次\nre_str = r'[a-zA-Z][a-zA-Z\\d]{5,11}'\n# str1 = input('请输入密码:')\nstr1 = 'ab123456'\nresult = re.fullmatch(re_str, str1)\nif result:\n print('密码正确')\nelse:\n print('密码错误')\n\n# 分之、捕获、贪婪\n# 分之 条件1|条件2 匹配条件1或条件2\n# \\d{2}|[a-z] 匹配两个数字字符或者一个小写字母\n# 正则中的分之也会出现短路,当条件1可以匹配就不会在使用条件2匹配\nre_str = r'[-+]?[1-9]\\d*[.]?\\d*|[-+]?0[.][0-9]*[1-9]|0'\nresult = re.fullmatch(re_str, '0.0000009')\nprint(result)\n# 捕获 通过正则获取符合条件的字串的时候可以在正则表达式中加括号,匹配后之获取括号里面匹配到的内容\n# re.findall(正则表达式,字符串) 在字符串中获取符合正则表达式条件的所有的字串返回一个列表\nstr1 = 'ahs123+34asdf24'\nprint(re.findall(r'\\d+', str1))\n\nstr2 = 'a153s123+34asfa24'\nprint(re.findall(r'a\\d+', str2))\nprint(re.findall(r'a(\\d+)', str2))\n\nstr3 = 'http://www.qq.com'\nprint(re.findall(r'^(http://)?www.(\\w+).com', str3))\n# 重复匹配 带多个分组的正则表达式可以在分组的后面通过添加\\数字来重复前面第几个分组中匹配到的内容\nre_str = r'(\\d{3})([a-z]{2})a\\1{2}-\\2'\nprint(re.findall(re_str, '123efa123123-ef'))\n# 贪婪 匹配次数后加?就是贪婪匹配:*?,+?,??,{M,N}?,{M,}?表示尽可能少的重复\nre_str = 'a.+b'\nre_str1 = 'a.+?b'\nstr1 = 'xxahdjbnnkhasssbkkkkk'\nprint(re.findall(re_str, str1))\nprint(re.findall(re_str1, str1))\n# 转义字符 \\\nre_str = r'a\\+\\(\\d{2}\\)'\nprint(re.fullmatch(re_str, 'a+(23)'))\n# re模块\n# complie\nre_str = r'\\d{3}'\nre_obj = re.compile(re_str)\nprint(re_obj.fullmatch('234'))\n# match 不完全匹配之匹配字符串开头 之匹配字符串开头 匹配成功返回匹配对象匹配失败返回None\n# fullmatch 完全匹配从字符串开头匹配到字符串结束\nre_str = r'\\d([A-Z]{2})'\nresult = re.fullmatch(re_str, '2HKdfsd')\nprint(result)\nresult = re.match(re_str, '8KLsifdfd==')\nprint(result)\n# 匹配对象\n# start,end 获取匹配结果的开始下标和结束下标\n# 匹配对象.start(n)/匹配对象.end(n) 获取正则表达式中第n个分组匹配到的开始下标/结束下标\nprint(result.start(), result.end())\n# print(result.start(1), result.end(2))\n# ggroup 获取匹配到的内容\n# 匹配对象.group() 获取整个正则表达式匹配到的内容\n# 匹配对象.group(n) 获取正则表达式第n个分组匹配到的内容\nprint(result.group())\nprint(result.group(1))\n# string 获取匹配的原字符串\n# 匹配对象.string\nprint(result.string)\n# search\n# search(正则表达式,字符串)匹配字符串中第一个满足正则表达式的字串,如果匹配成功返回匹配对象否则返回None\nstr1 = 'abc123hks362shjjk990kll'\nresult = re.search(r'\\d{3}[a-z]{2}', str1)\nprint(result)\n# split split(正则表达式,字符串) 在字符串中按照满足正则表达式条件的字串对字符串进行切割\nstr1 = 'ab+c7hdjd8jss-sk9s9kk*k'\nresult = re.split(r'\\d+|[+*-]+', str1)\nprint(result)\n# findall findall(正则表达式,字符串) 在字符串中获取满足正则表达式的所有的字符返回一个列表列表元素是字符串\nstr = 'abcd1235asdf'\nresult = re.findall(r'a[a-zA-Z]+', str)\nprint(result)\n\n\n# finditer finditer(正则表达式,字符串) 获取字符串中满足正则表达式的内容返回的是一个迭代器\n# def yt_finditer(pattern, string):\n# re1 = re.search(pattern, string)\n# while re1:\n# yield re1\n# string = string[re1.end():]\n# re1 = re.search(pattern, string)\n#\n# str1='haja37jjkd89sdhs909nnna238==='\n# result = yt_finditer(r'[a-zA-Z]{2,}(\\d+)(a-z)+?', str1)\n# print(next(result))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import input_data
import tensorflow as tf
from infogan import InfoGAN
if __name__ == '__main__':
# get input data
mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data', one_hot=True)
num_sample = mnist_data.train.num_examples
dataset = 'mnist'
if dataset == 'mnist':
input_dim = 784
# define latent dimension
z_dim = 16
c_discrete_dim = 10
c_continuous_dim = 2
num_epoch = 1000000
batch_size = 32
# Launch the session
with tf.Session() as sess:
gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,
dataset=dataset, input_dim=input_dim, z_dim=z_dim, c_discrete_dim=c_discrete_dim,
c_continuous_dim=c_continuous_dim)
# build generative adversarial network
gan.build_net()
# train the model
gan.train(mnist_data.train, num_sample)
|
normal
|
{
"blob_id": "02a28b61ad9d664c89829df019f4887c2c869f91",
"index": 6046,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data',\n one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n num_epoch = 1000000\n batch_size = 32\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim,\n c_discrete_dim=c_discrete_dim, c_continuous_dim=c_continuous_dim)\n gan.build_net()\n gan.train(mnist_data.train, num_sample)\n",
"step-3": "import input_data\nimport tensorflow as tf\nfrom infogan import InfoGAN\nif __name__ == '__main__':\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data',\n one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n num_epoch = 1000000\n batch_size = 32\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim,\n c_discrete_dim=c_discrete_dim, c_continuous_dim=c_continuous_dim)\n gan.build_net()\n gan.train(mnist_data.train, num_sample)\n",
"step-4": "import input_data\nimport tensorflow as tf\nfrom infogan import InfoGAN\n\nif __name__ == '__main__':\n # get input data\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data', one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n\n # define latent dimension\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n\n num_epoch = 1000000\n batch_size = 32\n\n # Launch the session\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim, c_discrete_dim=c_discrete_dim,\n c_continuous_dim=c_continuous_dim)\n\n # build generative adversarial network\n gan.build_net()\n\n # train the model\n gan.train(mnist_data.train, num_sample)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def crawl_bitcoin_price():
print('start crawling!')
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print('crawl paused!')
return
@app.route('/pause')
def pause():
global crawl_enabled
crawl_enabled = False
return 'PAUSED!'
<|reserved_special_token_0|>
@app.route('/')
def default():
return 'SAMPLE TRADING SYSTEM'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def crawl_bitcoin_price():
print('start crawling!')
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print('crawl paused!')
return
@app.route('/pause')
def pause():
global crawl_enabled
crawl_enabled = False
return 'PAUSED!'
@app.route('/status')
def status():
return '100%'
@app.route('/')
def default():
return 'SAMPLE TRADING SYSTEM'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def crawl_bitcoin_price():
print('start crawling!')
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print('crawl paused!')
return
@app.route('/pause')
def pause():
global crawl_enabled
crawl_enabled = False
return 'PAUSED!'
@app.route('/status')
def status():
return '100%'
@app.route('/')
def default():
return 'SAMPLE TRADING SYSTEM'
if __name__ == '__main__':
crawl_bitcoin_price()
app.run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
crawl_enabled = True
def crawl_bitcoin_price():
print('start crawling!')
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print('crawl paused!')
return
@app.route('/pause')
def pause():
global crawl_enabled
crawl_enabled = False
return 'PAUSED!'
@app.route('/status')
def status():
return '100%'
@app.route('/')
def default():
return 'SAMPLE TRADING SYSTEM'
if __name__ == '__main__':
crawl_bitcoin_price()
app.run()
<|reserved_special_token_1|>
from flask import Flask
from threading import Timer
from crypto_crawler.const import BITCOIN_CRAWLING_PERIOD_SEC, COIN_MARKET_CAP_URL
from crypto_crawler.crawler import get_web_content, filter_invalid_records
app = Flask(__name__)
crawl_enabled = True
def crawl_bitcoin_price():
print("start crawling!")
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
# write_many(INSERT_CRYPTO_MANY, list(map(lambda x: x.to_tuple(), bitcoin_prices)))
# alarm_arbitrage(bitcoin_prices)
# alarm_prediction()
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print("crawl paused!")
return
# actual crawl
@app.route("/pause")
def pause():
global crawl_enabled
crawl_enabled = False
return "PAUSED!"
@app.route("/status")
def status():
return "100%"
@app.route("/")
def default():
return "SAMPLE TRADING SYSTEM"
if __name__ == "__main__":
crawl_bitcoin_price()
app.run()
|
flexible
|
{
"blob_id": "ebbc6f9115e6b4ca7d1050a59cf175d123b6f3aa",
"index": 4871,
"step-1": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\n@app.route('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\n<mask token>\n\n\n@app.route('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\n@app.route('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\n@app.route('/status')\ndef status():\n return '100%'\n\n\n@app.route('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\n@app.route('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\n@app.route('/status')\ndef status():\n return '100%'\n\n\n@app.route('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\nif __name__ == '__main__':\n crawl_bitcoin_price()\n app.run()\n",
"step-4": "<mask token>\napp = Flask(__name__)\ncrawl_enabled = True\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\n@app.route('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\n@app.route('/status')\ndef status():\n return '100%'\n\n\n@app.route('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\nif __name__ == '__main__':\n crawl_bitcoin_price()\n app.run()\n",
"step-5": "from flask import Flask\nfrom threading import Timer\n\nfrom crypto_crawler.const import BITCOIN_CRAWLING_PERIOD_SEC, COIN_MARKET_CAP_URL\nfrom crypto_crawler.crawler import get_web_content, filter_invalid_records\n\napp = Flask(__name__)\ncrawl_enabled = True\n\n\ndef crawl_bitcoin_price():\n print(\"start crawling!\")\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n # write_many(INSERT_CRYPTO_MANY, list(map(lambda x: x.to_tuple(), bitcoin_prices)))\n # alarm_arbitrage(bitcoin_prices)\n # alarm_prediction()\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print(\"crawl paused!\")\n return\n\n # actual crawl\n\n\n@app.route(\"/pause\")\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return \"PAUSED!\"\n\n\n@app.route(\"/status\")\ndef status():\n return \"100%\"\n\n\n@app.route(\"/\")\ndef default():\n return \"SAMPLE TRADING SYSTEM\"\n\n\nif __name__ == \"__main__\":\n crawl_bitcoin_price()\n app.run()\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PhoneUser.last_contacted'
db.add_column(u'smslink_phoneuser', 'last_contacted',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PhoneUser.last_contacted'
db.delete_column(u'smslink_phoneuser', 'last_contacted')
models = {
u'foodproviders.entryrequirement': {
'Meta': {'object_name': 'EntryRequirement'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})
},
u'foodproviders.postcode': {
'Meta': {'unique_together': "(('outward', 'inward'),)", 'object_name': 'PostCode'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})
},
u'smslink.phoneuser': {
'Meta': {'object_name': 'PhoneUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foodproviders.PostCode']", 'null': 'True', 'blank': 'True'}),
'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['foodproviders.EntryRequirement']", 'symmetrical': 'False'})
}
}
complete_apps = ['smslink']
|
normal
|
{
"blob_id": "2c1de638ac25a9f27b1af94fa075b7c1b9df6884",
"index": 993,
"step-1": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"step-4": "import datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"step-5": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'PhoneUser.last_contacted'\n db.add_column(u'smslink_phoneuser', 'last_contacted',\n self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'PhoneUser.last_contacted'\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n\n\n models = {\n u'foodproviders.entryrequirement': {\n 'Meta': {'object_name': 'EntryRequirement'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})\n },\n u'foodproviders.postcode': {\n 'Meta': {'unique_together': \"(('outward', 'inward'),)\", 'object_name': 'PostCode'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),\n 'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})\n },\n u'smslink.phoneuser': {\n 'Meta': {'object_name': 'PhoneUser'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),\n 'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),\n 'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})\n }\n }\n\n complete_apps = ['smslink']",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='session')
def me(api):
return api.people.me()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
import ciscosparkapi
from tests.utils import create_string
@pytest.fixture(scope='session')
def me(api):
return api.people.me()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""pytest People functions, fixtures and tests."""
import pytest
import ciscosparkapi
from tests.utils import create_string
# Helper Functions
# pytest Fixtures
@pytest.fixture(scope="session")
def me(api):
return api.people.me()
|
flexible
|
{
"blob_id": "9b7ffa2bb62a8decbec51c6bdea38b4338726816",
"index": 1891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='session')\ndef me(api):\n return api.people.me()\n",
"step-3": "<mask token>\nimport pytest\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\n@pytest.fixture(scope='session')\ndef me(api):\n return api.people.me()\n",
"step-4": "# -*- coding: utf-8 -*-\n\n\"\"\"pytest People functions, fixtures and tests.\"\"\"\n\n\nimport pytest\n\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\n# Helper Functions\n\n\n\n\n# pytest Fixtures\n\n@pytest.fixture(scope=\"session\")\ndef me(api):\n return api.people.me()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import nltk
with open('posts.json', 'r') as infile:
posts = []
for line in infile:
posts.append(json.loads(line[0:len(line)-2]))
for post in posts:
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print ''.join(post['title']) + ' Posted: ' + ''.join(post['posted'])
print '\n'
print ''.join(post['original_post_link'])
print 'Keywords:'
print post['keywords']
print '\nSummary: \n'
text = nltk.Text(post['tokenized_text'])
print ' '.join(text[0:100]) + '\n'
|
normal
|
{
"blob_id": "2f15814d97708e33585ea6b45e89b5a5e69d82fe",
"index": 5694,
"step-1": "import json\nimport nltk\n\nwith open('posts.json', 'r') as infile:\n\tposts = []\n\tfor line in infile:\n\t\tposts.append(json.loads(line[0:len(line)-2]))\n\nfor post in posts:\n print '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\n print ''.join(post['title']) + ' Posted: ' + ''.join(post['posted'])\n print '\\n'\n print ''.join(post['original_post_link'])\n print 'Keywords:'\n print post['keywords']\n print '\\nSummary: \\n'\n text = nltk.Text(post['tokenized_text'])\n print ' '.join(text[0:100]) + '\\n'",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd
import numpy as np
#from ctaFunction import std_normalized
def barStdNormal(bars, timeperiod=5):
'''Std Normal '''
close = bars['close']
result = close.rolling(timeperiod).apply(std_normalized)
return result
|
normal
|
{
"blob_id": "6fa0e1dabd178507c32c62146b404bb42f8445d4",
"index": 9860,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef barStdNormal(bars, timeperiod=5):\n \"\"\"Std Normal \"\"\"\n close = bars['close']\n result = close.rolling(timeperiod).apply(std_normalized)\n return result\n",
"step-3": "import pandas as pd\nimport numpy as np\n\n\ndef barStdNormal(bars, timeperiod=5):\n \"\"\"Std Normal \"\"\"\n close = bars['close']\n result = close.rolling(timeperiod).apply(std_normalized)\n return result\n",
"step-4": "import pandas as pd \nimport numpy as np\n#from ctaFunction import std_normalized\n\ndef barStdNormal(bars, timeperiod=5):\n '''Std Normal '''\n close = bars['close']\n result = close.rolling(timeperiod).apply(std_normalized)\n\n return result",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
for _ in range(int(input())):
n = int(input())
s = input()
cur = 0
for i in s[::-1]:
if i == ')':
cur += 1
else:
break
print('Yes') if cur > n // 2 else print('No')
<|reserved_special_token_1|>
for _ in range(int(input())):
n = int(input())
s = input()
cur = 0
for i in s[::-1]:
if i==')':
cur+=1
else:
break
print("Yes") if cur>n//2 else print("No")
|
flexible
|
{
"blob_id": "31b420adebbe0d3ee6da2ed8236ece1526bdb063",
"index": 6290,
"step-1": "<mask token>\n",
"step-2": "for _ in range(int(input())):\n n = int(input())\n s = input()\n cur = 0\n for i in s[::-1]:\n if i == ')':\n cur += 1\n else:\n break\n print('Yes') if cur > n // 2 else print('No')\n",
"step-3": "for _ in range(int(input())):\r\n n = int(input())\r\n s = input()\r\n cur = 0\r\n for i in s[::-1]:\r\n if i==')':\r\n cur+=1\r\n else:\r\n break\r\n print(\"Yes\") if cur>n//2 else print(\"No\")\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')
), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',
include('goimcommunity.leaguesystem.urls')), ('^board/', include(
'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\w+)/board/',
include('sphene.sphboard.urls'), {'urlPrefix': ''}), (
'^rewrite/(?P<groupName>\\w+)/wiki/', include('sphene.sphwiki.urls'), {
'urlPrefix': ''}), ('^rewrite/\\w+/accounts/login/$',
'django.contrib.auth.views.login'), ('^rewrite/\\w+/accounts/logout/$',
'django.contrib.auth.views.logout'), (
'^(?P<urlPrefix>test/(?P<groupName>\\w+))/board/', include(
'sphene.sphboard.urls')), (
'^(?P<urlPrefix>test/(?P<groupName>\\w+))/wiki/', include(
'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {
'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',
'django.views.static.serve', {'document_root': settings.ROOT_PATH +
'/../../communitytools/static/sphene'}), ('^static/(.*)$',
'django.views.static.serve', {'document_root': settings.ROOT_PATH +
'/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {
'document_root': '/home/kahless/dev/python/diamanda/media'}), (
'^accounts/login/$', 'django.contrib.auth.views.login'), (
'^accounts/logout/$', 'django.contrib.auth.views.logout'), (
'^accounts/register/$', 'sphene.community.views.register'))
<|reserved_special_token_1|>
from django.conf.urls.defaults import *
from django.conf import settings
from django.conf.urls.defaults import *
<|reserved_special_token_0|>
urlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')
), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',
include('goimcommunity.leaguesystem.urls')), ('^board/', include(
'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\w+)/board/',
include('sphene.sphboard.urls'), {'urlPrefix': ''}), (
'^rewrite/(?P<groupName>\\w+)/wiki/', include('sphene.sphwiki.urls'), {
'urlPrefix': ''}), ('^rewrite/\\w+/accounts/login/$',
'django.contrib.auth.views.login'), ('^rewrite/\\w+/accounts/logout/$',
'django.contrib.auth.views.logout'), (
'^(?P<urlPrefix>test/(?P<groupName>\\w+))/board/', include(
'sphene.sphboard.urls')), (
'^(?P<urlPrefix>test/(?P<groupName>\\w+))/wiki/', include(
'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {
'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',
'django.views.static.serve', {'document_root': settings.ROOT_PATH +
'/../../communitytools/static/sphene'}), ('^static/(.*)$',
'django.views.static.serve', {'document_root': settings.ROOT_PATH +
'/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {
'document_root': '/home/kahless/dev/python/diamanda/media'}), (
'^accounts/login/$', 'django.contrib.auth.views.login'), (
'^accounts/logout/$', 'django.contrib.auth.views.logout'), (
'^accounts/register/$', 'sphene.community.views.register'))
<|reserved_special_token_1|>
from django.conf.urls.defaults import *
#from wiki.feeds import *
from django.conf import settings
from django.conf.urls.defaults import *
# feeds for wikiPages and wikiNews
"""
feeds = {
'latestpages': LatestPages,
}
sitemaps = {
'wiki': Wiki,
}
"""
urlpatterns = patterns('',
# Example:
# (r'^goimcommunity/', include('goimcommunity.apps.foo.urls.foo')),
# Uncomment this for admin:
(r'^admin/', include('django.contrib.admin.urls')),
(r'^polls/', include('goimcommunity.polls.urls')),
(r'^league/', include('goimcommunity.leaguesystem.urls')),
(r'^board/', include('sphene.sphboard.urls')),
(r'^rewrite/(?P<groupName>\w+)/board/', include('sphene.sphboard.urls'), {'urlPrefix': '' }),
(r'^rewrite/(?P<groupName>\w+)/wiki/', include('sphene.sphwiki.urls'), {'urlPrefix': '' }),
(r'^rewrite/\w+/accounts/login/$', 'django.contrib.auth.views.login'),
(r'^rewrite/\w+/accounts/logout/$', 'django.contrib.auth.views.logout' ),
(r'^(?P<urlPrefix>test/(?P<groupName>\w+))/board/', include('sphene.sphboard.urls')),
(r'^(?P<urlPrefix>test/(?P<groupName>\w+))/wiki/', include('sphene.sphwiki.urls')),
(r'^wiki/', include('sphene.sphwiki.urls'), { 'urlPrefix': 'wiki', 'groupName': 'Sphene' }),
(r'^static/sphene/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../../communitytools/static/sphene' }),
(r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../static' }),
(r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': '/home/kahless/dev/python/diamanda/media'}), # change it or remove if not on dev server
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
(r'^accounts/logout/$','django.contrib.auth.views.logout'),
(r'^accounts/register/$', 'sphene.community.views.register' ),
# (r'^forum/', include('myghtyboard.URLconf')), # forum
# (r'^muh/', 'wiki.views.show_page'), # wiki main page under /
# (r'^wiki/', include('wiki.URLconf')), # wiki
# (r'^wiki/feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}), # wiki feeds
# (r'^wiki/sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}), # wikiPages sitemap
)
|
flexible
|
{
"blob_id": "f44ff7488ae8fc64bc1785fb6cbe80c4cc011fbe",
"index": 6808,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')\n ), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',\n include('goimcommunity.leaguesystem.urls')), ('^board/', include(\n 'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\\\w+)/board/',\n include('sphene.sphboard.urls'), {'urlPrefix': ''}), (\n '^rewrite/(?P<groupName>\\\\w+)/wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': ''}), ('^rewrite/\\\\w+/accounts/login/$',\n 'django.contrib.auth.views.login'), ('^rewrite/\\\\w+/accounts/logout/$',\n 'django.contrib.auth.views.logout'), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/board/', include(\n 'sphene.sphboard.urls')), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/wiki/', include(\n 'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../../communitytools/static/sphene'}), ('^static/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {\n 'document_root': '/home/kahless/dev/python/diamanda/media'}), (\n '^accounts/login/$', 'django.contrib.auth.views.login'), (\n '^accounts/logout/$', 'django.contrib.auth.views.logout'), (\n '^accounts/register/$', 'sphene.community.views.register'))\n",
"step-3": "from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.conf.urls.defaults import *\n<mask token>\nurlpatterns = patterns('', ('^admin/', include('django.contrib.admin.urls')\n ), ('^polls/', include('goimcommunity.polls.urls')), ('^league/',\n include('goimcommunity.leaguesystem.urls')), ('^board/', include(\n 'sphene.sphboard.urls')), ('^rewrite/(?P<groupName>\\\\w+)/board/',\n include('sphene.sphboard.urls'), {'urlPrefix': ''}), (\n '^rewrite/(?P<groupName>\\\\w+)/wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': ''}), ('^rewrite/\\\\w+/accounts/login/$',\n 'django.contrib.auth.views.login'), ('^rewrite/\\\\w+/accounts/logout/$',\n 'django.contrib.auth.views.logout'), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/board/', include(\n 'sphene.sphboard.urls')), (\n '^(?P<urlPrefix>test/(?P<groupName>\\\\w+))/wiki/', include(\n 'sphene.sphwiki.urls')), ('^wiki/', include('sphene.sphwiki.urls'), {\n 'urlPrefix': 'wiki', 'groupName': 'Sphene'}), ('^static/sphene/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../../communitytools/static/sphene'}), ('^static/(.*)$',\n 'django.views.static.serve', {'document_root': settings.ROOT_PATH +\n '/../static'}), ('^site_media/(.*)$', 'django.views.static.serve', {\n 'document_root': '/home/kahless/dev/python/diamanda/media'}), (\n '^accounts/login/$', 'django.contrib.auth.views.login'), (\n '^accounts/logout/$', 'django.contrib.auth.views.logout'), (\n '^accounts/register/$', 'sphene.community.views.register'))\n",
"step-4": "from django.conf.urls.defaults import *\n#from wiki.feeds import *\nfrom django.conf import settings\n\nfrom django.conf.urls.defaults import *\n# feeds for wikiPages and wikiNews\n\"\"\"\nfeeds = {\n 'latestpages': LatestPages,\n}\n\nsitemaps = {\n\t'wiki': Wiki,\n\t}\n\"\"\"\nurlpatterns = patterns('',\n # Example:\n # (r'^goimcommunity/', include('goimcommunity.apps.foo.urls.foo')),\n\n # Uncomment this for admin:\n (r'^admin/', include('django.contrib.admin.urls')),\n\n (r'^polls/', include('goimcommunity.polls.urls')),\n\t\t (r'^league/', include('goimcommunity.leaguesystem.urls')),\n\n (r'^board/', include('sphene.sphboard.urls')),\n (r'^rewrite/(?P<groupName>\\w+)/board/', include('sphene.sphboard.urls'), {'urlPrefix': '' }),\n (r'^rewrite/(?P<groupName>\\w+)/wiki/', include('sphene.sphwiki.urls'), {'urlPrefix': '' }),\n\t\t (r'^rewrite/\\w+/accounts/login/$', 'django.contrib.auth.views.login'),\n\t\t (r'^rewrite/\\w+/accounts/logout/$', 'django.contrib.auth.views.logout' ),\n (r'^(?P<urlPrefix>test/(?P<groupName>\\w+))/board/', include('sphene.sphboard.urls')),\n (r'^(?P<urlPrefix>test/(?P<groupName>\\w+))/wiki/', include('sphene.sphwiki.urls')),\n\n (r'^wiki/', include('sphene.sphwiki.urls'), { 'urlPrefix': 'wiki', 'groupName': 'Sphene' }),\n\n\n (r'^static/sphene/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../../communitytools/static/sphene' }),\n\t\t (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_PATH + '/../static' }),\n\n\n\n (r'^site_media/(.*)$', 'django.views.static.serve', {'document_root': '/home/kahless/dev/python/diamanda/media'}), # change it or remove if not on dev server\n\n (r'^accounts/login/$', 'django.contrib.auth.views.login'),\n (r'^accounts/logout/$','django.contrib.auth.views.logout'),\n (r'^accounts/register/$', 'sphene.community.views.register' ),\n \n\n# (r'^forum/', include('myghtyboard.URLconf')), # forum\n# (r'^muh/', 'wiki.views.show_page'), # wiki main page under /\n# (r'^wiki/', include('wiki.URLconf')), # wiki\n# (r'^wiki/feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}), # wiki feeds\n# (r'^wiki/sitemap.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}), # wikiPages sitemap\n\n\n \n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Keychains(DeviceFeature):
<|reserved_special_token_0|>
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
<|reserved_special_token_0|>
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = 'Keychains',
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
<|reserved_special_token_1|>
from enum import Enum
from genie.decorator import managedattribute
from genie.conf.base import Base, DeviceFeature, LinkFeature, Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper, KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = 'Keychains',
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
<|reserved_special_token_1|>
from enum import Enum
# Genie
from genie.decorator import managedattribute
from genie.conf.base import Base, \
DeviceFeature, \
LinkFeature, \
Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, \
SubAttributesDict, \
AttributesHelper, \
KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = ('Keychains', )
# Structure Hierarchy:
# Keychains
# +--DeviceAttributes
# +-- KeyChainAttributes
# | +-- KeyIdAttributes
# +-- KeyChainMacSecAttributes
# | +-- KeyIdAttributes
# +-- KeyChainTunEncAttributes
# +-- KeyIdAttributes
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# =============================================
# Device attributes
# =============================================
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
# KeyChainAttributes
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr',
read_only=True,
doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
# KeyChainMacSecAttributes
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(
name='ms_keychain_attr',
read_only=True,
doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes,
parent=self)
# KeyChainTunEncAttributes
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(
name='te_keychain_attr',
read_only=True,
doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes,
parent=self)
device_attr = managedattribute(name='device_attr',
read_only=True,
doc=DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
# ============ managedattributes ============#
key_id = managedattribute(name='key_id',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type',
default=None,
type=managedattribute.test_istype(int),
doc='Set key encode type')
key_string = managedattribute(name='key_string',
default=None,
type=(None,
managedattribute.test_istype(str)),
doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(
name='crypto_algo',
default=None,
type=(None, CRYPTO_ALGO),
doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(
name='lifetime_start',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(
name='lifetime_duration',
default=None,
type=(None, managedattribute.test_istype(int)),
doc='Set key lifetime duration')
# =========================================================
# build_config
# =========================================================
def build_config(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
flexible
|
{
"blob_id": "6d2581b83a2839dcbc644ca572b05b158d80b58d",
"index": 2479,
"step-1": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n <mask token>\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n <mask token>\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n <mask token>\n <mask token>\n <mask token>\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n <mask token>\n <mask token>\n <mask token>\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-2": "<mask token>\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-3": "<mask token>\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-4": "from enum import Enum\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, DeviceFeature, LinkFeature, Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper, KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n__all__ = 'Keychains',\n\n\nclass Keychains(DeviceFeature):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n\n class KeyChainAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n keychain_attr = managedattribute(name='keychain_attr', read_only=\n True, doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n ms_keychain_attr = managedattribute(name='ms_keychain_attr',\n read_only=True, doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self\n )\n\n\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n\n class KeyIdAttributes(KeyedSubAttributes):\n\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n key_id_attr = managedattribute(name='key_id_attr', read_only=\n True, doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n te_keychain_attr = managedattribute(name='te_keychain_attr',\n read_only=True, doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self\n )\n device_attr = managedattribute(name='device_attr', read_only=True, doc=\n DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n key_id = managedattribute(name='key_id', default=None, type=(None,\n managedattribute.test_istype(str)), doc='Configure a key')\n key_enc_type = managedattribute(name='key_enc_type', default=None, type\n =managedattribute.test_istype(int), doc='Set key encode type')\n key_string = managedattribute(name='key_string', default=None, type=(\n None, managedattribute.test_istype(str)), doc='Set key string')\n\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n crypto_algo = managedattribute(name='crypto_algo', default=None, type=(\n None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')\n lifetime_start = managedattribute(name='lifetime_start', default=None,\n type=(None, managedattribute.test_istype(str)), doc=\n 'Set start time for sending lifetime of encryption key')\n lifetime_duration = managedattribute(name='lifetime_duration', default=\n None, type=(None, managedattribute.test_istype(int)), doc=\n 'Set key lifetime duration')\n\n def build_config(self, devices=None, interfaces=None, links=None, apply\n =True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self, devices=None, interfaces=None, links=None,\n apply=True, attributes=None, **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n devices, interfaces, links = consolidate_feature_args(self, devices,\n interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices, sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-5": "from enum import Enum\n\n# Genie\nfrom genie.decorator import managedattribute\nfrom genie.conf.base import Base, \\\n DeviceFeature, \\\n LinkFeature, \\\n Interface\nimport genie.conf.base.attributes\nfrom genie.libs.conf.base.feature import consolidate_feature_args\nfrom genie.conf.base.attributes import SubAttributes, \\\n SubAttributesDict, \\\n AttributesHelper, \\\n KeyedSubAttributes\nfrom genie.conf.base.attributes import InterfaceSubAttributes\nfrom genie.libs import parser\nfrom genie.abstract import Lookup\nfrom genie.ops.base import Base as ops_Base\nfrom genie.ops.base import Context\n\n__all__ = ('Keychains', )\n# Structure Hierarchy:\n# Keychains\n# +--DeviceAttributes\n# +-- KeyChainAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainMacSecAttributes\n# | +-- KeyIdAttributes\n# +-- KeyChainTunEncAttributes\n# +-- KeyIdAttributes\n\n\nclass Keychains(DeviceFeature):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # =============================================\n # Device attributes\n # =============================================\n class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):\n\n # KeyChainAttributes\n class KeyChainAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n keychain_attr = managedattribute(name='keychain_attr',\n read_only=True,\n doc=KeyChainAttributes.__doc__)\n\n @keychain_attr.initter\n def keychain_attr(self):\n return SubAttributesDict(self.KeyChainAttributes, parent=self)\n\n # KeyChainMacSecAttributes\n class KeyChainMacSecAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.ms_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n ms_keychain_attr = managedattribute(\n name='ms_keychain_attr',\n read_only=True,\n doc=KeyChainMacSecAttributes.__doc__)\n\n @ms_keychain_attr.initter\n def ms_keychain_attr(self):\n return SubAttributesDict(self.KeyChainMacSecAttributes,\n parent=self)\n\n # KeyChainTunEncAttributes\n class KeyChainTunEncAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.te_key_chain = key\n super().__init__(parent)\n\n # KeyIdAttributes\n class KeyIdAttributes(KeyedSubAttributes):\n def __init__(self, parent, key):\n self.key_id = key\n super().__init__(parent)\n\n key_id_attr = managedattribute(name='key_id_attr',\n read_only=True,\n doc=KeyIdAttributes.__doc__)\n\n @key_id_attr.initter\n def key_id_attr(self):\n return SubAttributesDict(self.KeyIdAttributes, parent=self)\n\n te_keychain_attr = managedattribute(\n name='te_keychain_attr',\n read_only=True,\n doc=KeyChainTunEncAttributes.__doc__)\n\n @te_keychain_attr.initter\n def te_keychain_attr(self):\n return SubAttributesDict(self.KeyChainTunEncAttributes,\n parent=self)\n\n device_attr = managedattribute(name='device_attr',\n read_only=True,\n doc=DeviceAttributes.__doc__)\n\n @device_attr.initter\n def device_attr(self):\n return SubAttributesDict(self.DeviceAttributes, parent=self)\n\n # ============ managedattributes ============#\n key_id = managedattribute(name='key_id',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Configure a key')\n\n key_enc_type = managedattribute(name='key_enc_type',\n default=None,\n type=managedattribute.test_istype(int),\n doc='Set key encode type')\n\n key_string = managedattribute(name='key_string',\n default=None,\n type=(None,\n managedattribute.test_istype(str)),\n doc='Set key string')\n\n class CRYPTO_ALGO(Enum):\n aes_128_cmac = 'aes-128-cmac'\n aes_256_cmac = 'aes-256-cmac'\n\n crypto_algo = managedattribute(\n name='crypto_algo',\n default=None,\n type=(None, CRYPTO_ALGO),\n doc='Set cryptographic authentication algorithm')\n\n lifetime_start = managedattribute(\n name='lifetime_start',\n default=None,\n type=(None, managedattribute.test_istype(str)),\n doc='Set start time for sending lifetime of encryption key')\n\n lifetime_duration = managedattribute(\n name='lifetime_duration',\n default=None,\n type=(None, managedattribute.test_istype(int)),\n doc='Set key lifetime duration')\n\n # =========================================================\n # build_config\n # =========================================================\n def build_config(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_config(apply=False, attributes=attributes2)\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n\n def build_unconfig(self,\n devices=None,\n interfaces=None,\n links=None,\n apply=True,\n attributes=None,\n **kwargs):\n attributes = AttributesHelper(self, attributes)\n cfgs = {}\n\n devices, interfaces, links = \\\n consolidate_feature_args(self, devices, interfaces, links)\n for key, sub, attributes2 in attributes.mapping_items('device_attr',\n keys=devices,\n sort=True):\n cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)\n\n if apply:\n for device_name, cfg in sorted(cfgs.items()):\n self.testbed.config_on_devices(cfg, fail_invalid=True)\n else:\n return cfgs\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from django.urls import path
from . import views
urlpatterns = [
# @app.route("/")
path('', views.home),
path("teams", views.showTeams),
path("teams/new", views.new),
path("teams/<teamname>", views.showSpecificTeam),
# path("allfood", views.showAllFoodItems),
# path("team/<teamname>", views.showSpecificTeam)
]
|
normal
|
{
"blob_id": "e267108177841110493061a4f84ae3d29850d028",
"index": 1853,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home), path('teams', views.showTeams), path(\n 'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home), path('teams', views.showTeams), path(\n 'teams/new', views.new), path('teams/<teamname>', views.showSpecificTeam)]\n",
"step-4": "from django.urls import path \nfrom . import views\n\n\nurlpatterns = [\n # @app.route(\"/\")\n path('', views.home),\n path(\"teams\", views.showTeams),\n path(\"teams/new\", views.new),\n path(\"teams/<teamname>\", views.showSpecificTeam),\n # path(\"allfood\", views.showAllFoodItems),\n # path(\"team/<teamname>\", views.showSpecificTeam) \n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import itertools
def permutations(string):
return list("".join(p) for p in set(itertools.permutations(string)))
|
normal
|
{
"blob_id": "3d49d03dbc38ee37eadd603b4b464b0e2e1a33d5",
"index": 5280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef permutations(string):\n return list(''.join(p) for p in set(itertools.permutations(string)))\n",
"step-3": "import itertools\n\n\ndef permutations(string):\n return list(''.join(p) for p in set(itertools.permutations(string)))\n",
"step-4": "import itertools\n\ndef permutations(string):\n return list(\"\".join(p) for p in set(itertools.permutations(string)))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Exercise 5
#Define with names stair1, stair2, and stair3 (from bottom up to top), and insert within the building model, the 3 stair models of the building.
|
normal
|
{
"blob_id": "4c42bad4197b51be0e9d18307c7b954a29281fe1",
"index": 3259,
"step-1": "#Exercise 5\n#Define with names stair1, stair2, and stair3 (from bottom up to top), and insert within the building model, the 3 stair models of the building.",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000
assert kwb_rule.get_bid_increase_percentage_display(
) == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display(
) == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1, 2, 3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1],
constants.KEYWORD_BID_TARGET_TYPES)
<|reserved_special_token_0|>
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title'):
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
<|reserved_special_token_0|>
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,
keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {
'error_code': 0, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000
assert kwb_rule.get_bid_increase_percentage_display(
) == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display(
) == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1, 2, 3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1],
constants.KEYWORD_BID_TARGET_TYPES)
<|reserved_special_token_0|>
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title'):
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
<|reserved_special_token_0|>
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][
'KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,
keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {
'error_code': 0, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000
assert kwb_rule.get_bid_increase_percentage_display(
) == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display(
) == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1, 2, 3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1],
constants.KEYWORD_BID_TARGET_TYPES)
def test_make_keywordbid_rule(kwb_rule):
kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)
assert kwb_rule == kw_bid_rule
assert kw_bid_rule.account == kwb_rule.account
not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)
assert not_found_kwb_rule is None
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title'):
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
def test_get_keyword_bids(yd_gateway, keyword_bids):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
data = keyword_bids
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=data)
mock.add(method='POST', url=url, status=404)
mock.add(method='POST', url=url, status=200, json=data)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
assert next(kwb).keyword_id == 13102117581
assert next(kwb).keyword_id == 13102117582
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
with pytest.raises(UnExpectedResult):
next(kwb)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
assert type(next(kwb).as_dict()) is dict
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][
'KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,
keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {
'error_code': 0, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
<|reserved_special_token_1|>
import pytest
import responses
from auctioneer import constants, controllers, entities
from common.http import UnExpectedResult
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000
assert kwb_rule.get_bid_increase_percentage_display(
) == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display(
) == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1, 2, 3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1],
constants.KEYWORD_BID_TARGET_TYPES)
def test_make_keywordbid_rule(kwb_rule):
kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)
assert kwb_rule == kw_bid_rule
assert kw_bid_rule.account == kwb_rule.account
not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)
assert not_found_kwb_rule is None
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title'):
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
def test_get_keyword_bids(yd_gateway, keyword_bids):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
data = keyword_bids
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=data)
mock.add(method='POST', url=url, status=404)
mock.add(method='POST', url=url, status=200, json=data)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
assert next(kwb).keyword_id == 13102117581
assert next(kwb).keyword_id == 13102117582
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
with pytest.raises(UnExpectedResult):
next(kwb)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,
selection_criteria={'CampaignIds': []})
assert type(next(kwb).as_dict()) is dict
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][
'KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,
keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=
keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {
'error_code': 0, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway,
kwb_ent, selection_criteria={'CampaignIds': []})
<|reserved_special_token_1|>
import pytest
import responses
from auctioneer import constants, controllers, entities
from common.http import UnExpectedResult
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000
assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1,2,3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)
def test_make_keywordbid_rule(kwb_rule):
kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)
assert kwb_rule == kw_bid_rule
assert kw_bid_rule.account == kwb_rule.account
not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)
assert not_found_kwb_rule is None
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title') :
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
def test_get_keyword_bids(yd_gateway, keyword_bids):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
data = keyword_bids
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=data)
mock.add(method='POST', url=url, status=404)
mock.add(method='POST', url=url, status=200, json=data)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert next(kwb).keyword_id == 13102117581
assert next(kwb).keyword_id == 13102117582
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
with pytest.raises(UnExpectedResult):
next(kwb)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert type(next(kwb).as_dict()) is dict
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
|
flexible
|
{
"blob_id": "e0435b0b34fc011e7330ab8882865131f7f78882",
"index": 922,
"step-1": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\n<mask token>\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\n<mask token>\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-2": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\n<mask token>\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\n<mask token>\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-3": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-4": "import pytest\nimport responses\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-5": "import pytest\nimport responses\n\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000\n assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1,2,3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title') :\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(dataset.info())
<|reserved_special_token_0|>
regressor.fit(features, labels)
<|reserved_special_token_0|>
regressor.predict(x)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
<|reserved_special_token_0|>
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
<|reserved_special_token_0|>
onehotencoder = OneHotEncoder(categorical_features=[0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
<|reserved_special_token_0|>
regressor = LinearRegression()
regressor.fit(features, labels)
x = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]
x = np.array(x).reshape(1, -1)
x[:, 0] = labelencoder.transform(x[:, 0])
x = onehotencoder.transform(x).toarray()
x = x[:, 1:]
regressor.predict(x)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import pandas as pd
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features=[0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
x = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]
x = np.array(x).reshape(1, -1)
x[:, 0] = labelencoder.transform(x[:, 0])
x = onehotencoder.transform(x).toarray()
x = x[:, 1:]
regressor.predict(x)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 17:38:50 2019
@author: User
"""
import numpy as np
import pandas as pd
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
x = ["Cabrini",337,1.5,2.3,9.0,0]
x = np.array(x).reshape(1,-1)
x[:,0] = labelencoder.transform(x[:,0])
x = onehotencoder.transform(x).toarray()
x = x[:,1:]
regressor.predict(x)
|
flexible
|
{
"blob_id": "94e8f0532da76c803b23fe2217b07dc8cf285710",
"index": 950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dataset.info())\n<mask token>\nregressor.fit(features, labels)\n<mask token>\nregressor.predict(x)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\n<mask token>\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\n<mask token>\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n<mask token>\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 17:38:50 2019\n\n@author: User\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv') \nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values \nlabels = dataset.iloc[:, -1:].values \nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features = [0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n\n\nfrom sklearn.linear_model import LinearRegression \nregressor = LinearRegression() \nregressor.fit(features, labels)\n\n\n\nx = [\"Cabrini\",337,1.5,2.3,9.0,0]\nx = np.array(x).reshape(1,-1)\nx[:,0] = labelencoder.transform(x[:,0])\nx = onehotencoder.transform(x).toarray()\nx = x[:,1:]\nregressor.predict(x)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
<|reserved_special_token_0|>
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
<|reserved_special_token_0|>
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROUND_DURS_DIGITS = 5
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
ROUND_DURS_DIGITS = 5
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
|
flexible
|
{
"blob_id": "eb9135c6bcf89a62534cfc8480e5d44a089fe5a8",
"index": 1216,
"step-1": "<mask token>\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\n<mask token>\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\n<mask token>\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-3": "<mask token>\nROUND_DURS_DIGITS = 5\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-4": "<mask token>\nimport music21 as m21\nimport music21.features.jSymbolic as jsym\nimport scipy.stats\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\nROUND_DURS_DIGITS = 5\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 7 17:42:18 2018\n\n@author: Tim\n\"\"\"\nimport music21 as m21\nimport music21.features.jSymbolic as jsym\nimport scipy.stats\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\n\n# round all duration values to this many digits!\n# some are stored as fractions and that's just inconvenient\nROUND_DURS_DIGITS = 5\n\n# N.B. THE HEADERS ARE:\n# 0: tunefamily\n# 1: songid\n# 2: motifid\n# 3: begintime\n# 4: endtime\n# 5: duration\n# 6: startindex\n# 7: endindex\n# 8: numberofnotes\n# 9: motifclass\n# 10: description\n# 11: annotator\n# 12: changes\n\n# try to fetch a single motif\n\n\n# def extractMotif(annEntry, songs):\n# \"\"\"\n# given a row from the annotation file and the database of score files,\n# return the notes of theassociated motif and some of its metadata as a\n# dictionary.\n# \"\"\"\n#\n# songName = annEntry[1]\n# inStart = int(annEntry[6])\n# numNotes = int(annEntry[8])\n#\n# #add number of ties before start index from start index; meertens\n# #DOESN'T count tied notes as notes but music21 DOES\n# allNotes = songs[songName].score.flat.notes.stream()\n# #subtract 1 here to get the first note of the occurence in the slice\n# #so that we can get rid of it if it's a rest\n# beforeSlice = allNotes[:inStart-1]\n# numTies = 0\n# for n in beforeSlice:\n# if(n.tie != None):\n# if(n.tie.type == 'start'):\n# numTies += 1\n#\n# inStart += numTies\n#\n# #do the same for ties inside of the snippet, but also keep track of where\n# #they are and save that information with the motif so we don't have to go\n# #through this procedure again\n# numTies = 0\n# inSlice = allNotes[inStart:(inStart+numNotes)]\n# for n in inSlice:\n# if(n.tie != None):\n# if(n.tie.type == 'start'):\n# numTies += 1\n#\n#\n# #this new numNotes will work with music21\n# numNotes += numTies\n#\n# #NOW we know that we have the actual motif!\n# motif = allNotes[inStart:(inStart+numNotes)]\n#\n# return {'notes':motif,\n# 'startInd':inStart,\n# 'endInd':(inStart+numNotes),\n# 'songID':annEntry[1],\n# 'motifClass':annEntry[9],\n# 'duration':annEntry[5]}\n\n# annotated first starting at 0, but tied notes are only counted for the onset\n# must disregard tied notes when doing start/end indices tabarnak\n\n# so: consider the list of notes up to the first index. if there's n ties\n# that live behind the start index, increment the start index by n. when done,\n# look 8 notes ahead and do the same thing\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n\n # inStart = int(annEntry[6])\n # numNotes = int(annEntry[8])\n numNotes = inEnd - inStart + 1 # including endpoints\n\n # add number of ties before start index from start index; meertens\n # DOESN'T count tied notes as notes but music21 DOES\n allNotes = songs[songName].score.flat.notes.stream()\n # subtract 1 here to get the first note of the occurence in the slice\n # so that we can get rid of it if it's a rest\n if(useTies):\n beforeSlice = allNotes[:inStart-1]\n numTies = 0\n for n in beforeSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n inStart += numTies\n\n # do the same for ties inside of the snippet, but also keep track of where\n # they are and save that information with the pattOcc so we don't have to go\n # through this procedure again (TODO)\n numTies = 0\n inSlice = allNotes[inStart:(inStart+numNotes)]\n for n in inSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n # this new numNotes will work with music21\n numNotes += numTies\n\n pattOcc = allNotes[inStart:(inStart+numNotes)]\n\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)\n\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014\n\n # HISTOGRAMS:\n # interval counting\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n\n return vec\n\n\n# single method that is passed an entry from the motifs dict\n# and the database of songs and returns a dict that is a feature\n# vector for that motif.\ndef getFeaturesForOccurrences(cur_class, songs):\n\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n\n # for now just remove rests\n\n noteNums = [x.pitch.midi for x in mel]\n intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]\n\n highest = max(noteNums)\n lowest = min(noteNums)\n\n vec['numNotes'] = len(noteNums)\n\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest-lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n\n # pitch counting\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n\n # interval counting\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n\n # -1 if monotonically down, 1 if up, else 0\n if all([np.sign(x) == 1 for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([np.sign(x) == -1 for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n\n # rhythmic properties\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]\n\n # rhythm counting\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n\n # POLYFIT IDEA\n yCoords = [y - noteNums[0] for y in noteNums]\n xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]\n xCoords = [x - xtemp[0] for x in xtemp]\n\n # print(str(xCoords) + \" vs \" + str(yCoords))\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n\n # add sequence representation of occurrence\n zeros = [0 for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n\n # differences between song and this motif\n songVec = songs[cur_class.songName].songFeatures\n\n song_diff_keys = [\n 'interval_mean',\n 'rhythm_variability',\n 'rhythm_density',\n 'interval_signs',\n 'pitch_mean',\n 'interval_prop_small',\n 'interval_prop_large'\n ]\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n\n # songScore = songs[motif['songName']]['score'].flat.notes.stream()\n# songScoreNums = [x.pitch.midi for x in songScore]\n\n# vec['intervalFollowing'] = 0\n# if motif['endInd'] + 1 < len(songScoreNums):\n# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]\n# vec['intervalPreceding'] = 0\n# if motif['endInd'] - 1 > 0:\n# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]\n\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)\n vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0\n\n # figure out how to tell if note has associated time signature\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])\n except m21.Music21ObjectException:\n # this is not a good solution.\n pass\n\n # send it back\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n # take the average/std over all occurrences\n vec = {}\n\n vec['numOccs'] = len(patternClass.occNames)\n\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]\n vec[\"avg_\" + fk] = np.mean(allOccVals)\n vec[\"std_\" + fk] = np.std(allOccVals)\n\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)\n for x in mel] for mel in scores]\n\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n\n vec['unique_pitch_prop_content'] = \\\n len(set(tuple(x) for x in noteNums)) / vec['numOccs']\n\n vec['unique_rhythm_prop_content'] = \\\n len(set(tuple(x) for x in noteDurs)) / vec['numOccs']\n\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]\n\n vec['prop_unique_content'] = \\\n len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']\n\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):\n # so: we want to take a sample of our huge number of generated pattern classes\n # such that the number of occurrences and average cardinality doesn't easily\n # distinguish our sample from the annotated group.\n # perform a quick and dirty knn to get a bunch of generated class names\n # whose cardinalities and numOccs somewhat match the annotated data.\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs)/2)\n\n # deep copy!\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n\n for i in range(len(annPClassNames)):\n\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n\n candidateNameList = []\n\n # calculate how close each generated class is to these parameters\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n\n candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2\n\n candidateNameList.append([candScore, gcn])\n\n # from the kNearest closest generated classes, choose one and remove\n # that one from the copy array\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n\n return chunks\n\n\n# just for testing: get all features\n# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))\ndef inspectFeature(featureName, table, tableNames, featsType=\"classFeatures\"):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n\n xs = []\n ys = []\n types = []\n\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n\n print(types)\n\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-ids": [
3,
8,
9,
10,
11
]
}
|
[
3,
8,
9,
10,
11
] |
from pandas_datareader import data as pdr
from datetime import date
class YahooHelper:
"""
Class to fetch Yahoo data
"""
def __init__(self):
"""
Default constructor which initiates object
"""
pass
def get_data(self, symbol):
"""
Function to collect Twitter data.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
"""
# Collect stock market data
self.data = self.get_stock_data(symbol)
# Symbol lookup:
def get_stock_data(symbol):
"""
Function to get stock data for current year by ticker symbol.
:param symbol: The Symbol used to identify
an NASDAQ-100 stock.
:return: Stock data for current year
"""
# Set current dates
start = date(date.today().year, 1, 1) # first of current year
end = date.today() # today
# Get yahoo Yahoo data
data = pdr.get_data_yahoo(symbol, start=start, end=end)
# Rename columns
data.columns = ["Highest price (USD)",
"Lowest price (USD)",
"Opening price (USD)",
"Closing price (USD)",
"Volume",
"Adjusted closing price (USD)"]
return data
# Export data to csv
def export_data(self):
"""
Function to extract stock data to csv.
"""
with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:
self.data.to_csv('../data/yahoodata.csv', sep='\t', encoding='utf-8')
# Header information
template = "# TSLA Stocks over time \n" + \
"# --------------------------------------------------------------------- \n" + \
"# Export of stock data of \"Tesla Inc.\" for current year. The dataset\n" + \
"# consists of selected key stock exchange figures on a daily basis. \n" + \
"# The data can be recreated at any time with the \"load_data.py\"-script.\n" + \
"# The data record contains one record sorted per trading day. \n" + \
"#\n" + \
"# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \n" + \
"# the company Tesla Inc. The stock information was limited to the period \n" + \
"# from 1st January to the current day of the year. \n" + \
"#\n" + \
"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n" + \
"# December, 26, 2018, Marco Romanutti \n" + \
"#\n" + \
"#\n" + \
"{}"""
with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:
fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))
|
normal
|
{
"blob_id": "b4b4dad5cf630dc1a627e323ea63577583d1e1c3",
"index": 1551,
"step-1": "<mask token>\n\n\nclass YahooHelper:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n <mask token>\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-2": "<mask token>\n\n\nclass YahooHelper:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-3": "<mask token>\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-4": "from pandas_datareader import data as pdr\nfrom datetime import date\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n self.data = self.get_stock_data(symbol)\n\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n start = date(date.today().year, 1, 1)\n end = date.today()\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n data.columns = ['Highest price (USD)', 'Lowest price (USD)',\n 'Opening price (USD)', 'Closing price (USD)', 'Volume',\n 'Adjusted closing price (USD)']\n return data\n\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding=\n 'utf-8')\n template = ('# TSLA Stocks over time \\n' +\n \"\"\"# --------------------------------------------------------------------- \n\"\"\"\n +\n '# Export of stock data of \"Tesla Inc.\" for current year. The dataset\\n'\n +\n \"\"\"# consists of selected key stock exchange figures on a daily basis. \n\"\"\"\n +\n '# The data can be recreated at any time with the \"load_data.py\"-script.\\n'\n +\n \"\"\"# The data record contains one record sorted per trading day. \n\"\"\"\n + '#\\n' +\n '# The data is restricted to the NASDAQ symbol \"TSLA\" which represents \\n'\n +\n \"\"\"# the company Tesla Inc. The stock information was limited to the period \n\"\"\"\n + '# from 1st January to the current day of the year. \\n' +\n '#\\n' +\n \"\"\"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \n\"\"\"\n + '# December, 26, 2018, Marco Romanutti \\n' + '#\\n' + '#\\n' +\n '{}')\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding=\n 'utf-8')))\n",
"step-5": "from pandas_datareader import data as pdr\nfrom datetime import date\n\n\nclass YahooHelper:\n \"\"\"\n Class to fetch Yahoo data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor which initiates object\n \"\"\"\n pass\n\n def get_data(self, symbol):\n \"\"\"\n Function to collect Twitter data.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n \"\"\"\n # Collect stock market data\n self.data = self.get_stock_data(symbol)\n\n # Symbol lookup:\n def get_stock_data(symbol):\n \"\"\"\n Function to get stock data for current year by ticker symbol.\n\n :param symbol: The Symbol used to identify\n an NASDAQ-100 stock.\n :return: Stock data for current year\n \"\"\"\n # Set current dates\n start = date(date.today().year, 1, 1) # first of current year\n end = date.today() # today\n\n # Get yahoo Yahoo data\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n\n # Rename columns\n data.columns = [\"Highest price (USD)\",\n \"Lowest price (USD)\",\n \"Opening price (USD)\",\n \"Closing price (USD)\",\n \"Volume\",\n \"Adjusted closing price (USD)\"]\n\n return data\n\n # Export data to csv\n def export_data(self):\n \"\"\"\n Function to extract stock data to csv.\n \"\"\"\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding='utf-8')\n # Header information\n template = \"# TSLA Stocks over time \\n\" + \\\n \"# --------------------------------------------------------------------- \\n\" + \\\n \"# Export of stock data of \\\"Tesla Inc.\\\" for current year. The dataset\\n\" + \\\n \"# consists of selected key stock exchange figures on a daily basis. \\n\" + \\\n \"# The data can be recreated at any time with the \\\"load_data.py\\\"-script.\\n\" + \\\n \"# The data record contains one record sorted per trading day. \\n\" + \\\n \"#\\n\" + \\\n \"# The data is restricted to the NASDAQ symbol \\\"TSLA\\\" which represents \\n\" + \\\n \"# the company Tesla Inc. The stock information was limited to the period \\n\" + \\\n \"# from 1st January to the current day of the year. \\n\" + \\\n \"#\\n\" + \\\n \"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \\n\" + \\\n \"# December, 26, 2018, Marco Romanutti \\n\" + \\\n \"#\\n\" + \\\n \"#\\n\" + \\\n \"{}\"\"\"\n\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os
import multiprocessing
import time
import psycopg2
#os.system("myproject1\\runscrapy2.py")
#from scrapy import cmdline
#os.system("scrapy crawl parts")
#cmdline.execute("cd myproject1".split())
#cmdline.execute("myproject1\\runscrapy.bat".split())
# start = time.perf_counter()
connection = psycopg2.connect(
host="localhost",
user="postgres",
database="SCRAPY_DB",
password="yolo12",
)
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS aa_otoyedekcim")
cursor.execute("CREATE TABLE IF NOT EXISTS aa_otoyedekcim (part_no TEXT, description TEXT, price TEXT, cur_url TEXT)")
connection.commit()
connection.close()
def urls():
os.system('urls.bat')
def urls_1():
os.system('urls_1.bat')
def urls_2():
os.system('urls_2.bat')
def urls_3():
os.system('urls_3.bat')
def urls_4():
os.system('urls_4.bat')
def urls_5():
os.system('urls_5.bat')
def urls_6():
os.system('urls_6.bat')
def urls_7():
os.system('urls_7.bat')
def urls_8():
os.system('urls_8.bat')
def urls_9():
os.system('urls_9.bat')
def urls_10():
os.system('urls_10.bat')
def urls_11():
os.system('urls_11.bat')
def urls_12():
os.system('urls_12.bat')
def urls_13():
os.system('urls_13.bat')
def urls_14():
os.system('urls_14.bat')
def urls_15():
os.system('urls_15.bat')
def urls_16():
os.system('urls_16.bat')
def urls_17():
os.system('urls_17.bat')
def urls_18():
os.system('urls_18.bat')
def urls_19():
os.system('urls_19.bat')
def urls_20():
os.system('urls_20.bat')
def urls_20():
os.system('urls_21.bat')
def urls_21():
os.system('urls_22.bat')
def urls_22():
os.system('urls_23.bat')
def urls_23():
os.system('urls_23.bat')
def urls_24():
os.system('urls_24.bat')
def urls_25():
os.system('urls_25.bat')
def urls_26():
os.system('urls_26.bat')
def urls_27():
os.system('urls_27.bat')
def urls_28():
os.system('urls_28.bat')
def urls_29():
os.system('urls_29.bat')
def urls_29():
os.system('urls_30.bat')
def urls_30():
os.system('urls_31.bat')
def urls_31():
os.system('urls_32.bat')
def urls_32():
os.system('urls_32.bat')
def urls_33():
os.system('urls_33.bat')
def urls_34():
os.system('urls_34.bat')
def urls_35():
os.system('urls_35.bat')
def urls_36():
os.system('urls_36.bat')
def urls_37():
os.system('urls_37.bat')
def urls_38():
os.system('urls_38.bat')
def urls_38():
os.system('urls_39.bat')
def urls_39():
os.system('urls_40.bat')
def urls_40():
os.system('urls_41.bat')
def urls_41():
os.system('urls_41.bat')
def urls_42():
os.system('urls_42.bat')
def urls_43():
os.system('urls_43.bat')
def urls_44():
os.system('urls_44.bat')
def urls_45():
os.system('urls_45.bat')
def urls_46():
os.system('urls_46.bat')
def urls_47():
os.system('urls_47.bat')
def urls_47():
os.system('urls_48.bat')
def urls_48():
os.system('urls_49.bat')
def urls_49():
os.system('urls_50.bat')
def urls_50():
os.system('urls_50.bat')
def urls_51():
os.system('urls_51.bat')
def urls_52():
os.system('urls_52.bat')
def urls_53():
os.system('urls_53.bat')
def urls_54():
os.system('urls_54.bat')
def urls_55():
os.system('urls_55.bat')
def urls_56():
os.system('urls_56.bat')
def urls_56():
os.system('urls_57.bat')
def urls_57():
os.system('urls_58.bat')
def urls_58():
os.system('urls_59.bat')
def urls_59():
os.system('urls_59.bat')
def urls_60():
os.system('urls_60.bat')
def urls_61():
os.system('urls_61.bat')
def urls_62():
os.system('urls_62.bat')
def urls_63():
os.system('urls_63.bat')
def urls_64():
os.system('urls_64.bat')
def urls_65():
os.system('urls_65.bat')
u1 = multiprocessing.Process(target=urls)
u2 = multiprocessing.Process(target=urls_1)
u3 = multiprocessing.Process(target=urls_2)
u4 = multiprocessing.Process(target=urls_3)
u5 = multiprocessing.Process(target=urls_4)
u6 = multiprocessing.Process(target=urls_5)
u7 = multiprocessing.Process(target=urls_6)
u8 = multiprocessing.Process(target=urls_7)
u9 = multiprocessing.Process(target=urls_8)
u10 = multiprocessing.Process(target=urls_9)
u11 = multiprocessing.Process(target=urls_10)
u12 = multiprocessing.Process(target=urls_11)
u13 = multiprocessing.Process(target=urls_12)
u14 = multiprocessing.Process(target=urls_13)
u15 = multiprocessing.Process(target=urls_14)
u16 = multiprocessing.Process(target=urls_15)
u16 = multiprocessing.Process(target=urls_16)
u17 = multiprocessing.Process(target=urls_17)
u18 = multiprocessing.Process(target=urls_18)
u19 = multiprocessing.Process(target=urls_19)
u20 = multiprocessing.Process(target=urls_20)
u21 = multiprocessing.Process(target=urls_21)
u22 = multiprocessing.Process(target=urls_22)
u23 = multiprocessing.Process(target=urls_23)
u24 = multiprocessing.Process(target=urls_24)
u25 = multiprocessing.Process(target=urls_25)
u26 = multiprocessing.Process(target=urls_26)
u27 = multiprocessing.Process(target=urls_27)
u28 = multiprocessing.Process(target=urls_28)
u29 = multiprocessing.Process(target=urls_29)
u30 = multiprocessing.Process(target=urls_30)
u31 = multiprocessing.Process(target=urls_31)
u32 = multiprocessing.Process(target=urls_32)
u33 = multiprocessing.Process(target=urls_33)
u34 = multiprocessing.Process(target=urls_34)
u35 = multiprocessing.Process(target=urls_35)
u36 = multiprocessing.Process(target=urls_36)
u37 = multiprocessing.Process(target=urls_37)
u38 = multiprocessing.Process(target=urls_38)
u39 = multiprocessing.Process(target=urls_39)
u40 = multiprocessing.Process(target=urls_40)
u41 = multiprocessing.Process(target=urls_41)
u42 = multiprocessing.Process(target=urls_42)
u43 = multiprocessing.Process(target=urls_43)
u44 = multiprocessing.Process(target=urls_44)
u45 = multiprocessing.Process(target=urls_45)
u46 = multiprocessing.Process(target=urls_46)
u47 = multiprocessing.Process(target=urls_47)
u48 = multiprocessing.Process(target=urls_48)
u49 = multiprocessing.Process(target=urls_49)
u50 = multiprocessing.Process(target=urls_50)
u51 = multiprocessing.Process(target=urls_51)
u52 = multiprocessing.Process(target=urls_52)
u53 = multiprocessing.Process(target=urls_53)
u54 = multiprocessing.Process(target=urls_54)
u55 = multiprocessing.Process(target=urls_55)
u56 = multiprocessing.Process(target=urls_56)
u57 = multiprocessing.Process(target=urls_57)
u58 = multiprocessing.Process(target=urls_58)
u59 = multiprocessing.Process(target=urls_59)
u60 = multiprocessing.Process(target=urls_60)
u61 = multiprocessing.Process(target=urls_61)
u62 = multiprocessing.Process(target=urls_62)
u63 = multiprocessing.Process(target=urls_63)
u64 = multiprocessing.Process(target=urls_64)
u65 = multiprocessing.Process(target=urls_65)
if __name__ == '__main__':
u1.start()
u2.start()
u3.start()
u4.start()
u5.start()
u6.start()
u7.start()
u8.start()
u9.start()
u10.start()
u11.start()
u12.start()
u13.start()
u14.start()
u15.start()
u16.start()
u17.start()
u18.start()
u19.start()
u20.start()
u21.start()
u22.start()
u1.join()
u2.join()
u3.join()
u4.join()
u5.join()
u6.join()
u7.join()
u8.join()
u9.join()
u10.join()
u11.join()
u12.join()
u13.join()
u14.join()
u15.join()
u16.join()
u17.join()
u18.join()
u19.join()
u20.join()
u21.join()
u22.join()
os.system('1_runmebaby_3.bat')
'''
if __name__ == '__main__':
u23.start()
u24.start()
u25.start()
u26.start()
u27.start()
u28.start()
u29.start()
u30.start()
u31.start()
u32.start()
u33.start()
u34.start()
u35.start()
u36.start()
u37.start()
u38.start()
u39.start()
u40.start()
u41.start()
u42.start()
u43.start()
u44.start()
u23.join()
u24.join()
u25.join()
u26.join()
u27.join()
u28.join()
u29.join()
u30.join()
u31.join()
u32.join()
u33.join()
u34.join()
u35.join()
u36.join()
u37.join()
u38.join()
u39.join()
u40.join()
u41.join()
u42.join()
u43.join()
u44.join()
if __name__ == '__main__':
u45.start()
u46.start()
u47.start()
u48.start()
u49.start()
u50.start()
u51.start()
u52.start()
u53.start()
u54.start()
u55.start()
u56.start()
u57.start()
u58.start()
u59.start()
u60.start()
u61.start()
u62.start()
u63.start()
u64.start()
u65.start()
u45.join()
u46.join()
u47.join()
u48.join()
u49.join()
u50.join()
u51.join()
u52.join()
u53.join()
u54.join()
u55.join()
u56.join()
u57.join()
u58.join()
u59.join()
u60.join()
u61.join()
u62.join()
u63.join()
u64.join()
u65.join()
# finish = time.perf_counter()
# print(f'Successfully finished in {round((finish-start)/60, 2)} minute(s) bro')
'''
|
normal
|
{
"blob_id": "8e0d729fa55aabede123d89a507296b7d8a45c8b",
"index": 1705,
"step-1": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\n<mask token>\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\n<mask token>\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\n<mask token>\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\n<mask token>\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\n<mask token>\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\n<mask token>\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\n<mask token>\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\n<mask token>\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\n<mask token>\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\n<mask token>\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\n<mask token>\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\n<mask token>\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\n<mask token>\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\n<mask token>\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\ndef urls_28():\n os.system('urls_28.bat')\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\n<mask token>\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\n<mask token>\n\n\ndef urls_43():\n os.system('urls_43.bat')\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\ndef urls_45():\n os.system('urls_45.bat')\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\n<mask token>\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\ndef urls_54():\n os.system('urls_54.bat')\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\ndef urls_56():\n os.system('urls_56.bat')\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\n<mask token>\n\n\ndef urls_60():\n os.system('urls_60.bat')\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef urls():\n os.system('urls.bat')\n\n\ndef urls_1():\n os.system('urls_1.bat')\n\n\n<mask token>\n\n\ndef urls_3():\n os.system('urls_3.bat')\n\n\ndef urls_4():\n os.system('urls_4.bat')\n\n\ndef urls_5():\n os.system('urls_5.bat')\n\n\ndef urls_6():\n os.system('urls_6.bat')\n\n\ndef urls_7():\n os.system('urls_7.bat')\n\n\ndef urls_8():\n os.system('urls_8.bat')\n\n\ndef urls_9():\n os.system('urls_9.bat')\n\n\n<mask token>\n\n\ndef urls_11():\n os.system('urls_11.bat')\n\n\ndef urls_12():\n os.system('urls_12.bat')\n\n\ndef urls_13():\n os.system('urls_13.bat')\n\n\ndef urls_14():\n os.system('urls_14.bat')\n\n\ndef urls_15():\n os.system('urls_15.bat')\n\n\ndef urls_16():\n os.system('urls_16.bat')\n\n\ndef urls_17():\n os.system('urls_17.bat')\n\n\ndef urls_18():\n os.system('urls_18.bat')\n\n\ndef urls_19():\n os.system('urls_19.bat')\n\n\ndef urls_20():\n os.system('urls_20.bat')\n\n\n<mask token>\n\n\ndef urls_21():\n os.system('urls_22.bat')\n\n\ndef urls_22():\n os.system('urls_23.bat')\n\n\ndef urls_23():\n os.system('urls_23.bat')\n\n\ndef urls_24():\n os.system('urls_24.bat')\n\n\ndef urls_25():\n os.system('urls_25.bat')\n\n\ndef urls_26():\n os.system('urls_26.bat')\n\n\ndef urls_27():\n os.system('urls_27.bat')\n\n\ndef urls_28():\n os.system('urls_28.bat')\n\n\ndef urls_29():\n os.system('urls_29.bat')\n\n\ndef urls_29():\n os.system('urls_30.bat')\n\n\ndef urls_30():\n os.system('urls_31.bat')\n\n\ndef urls_31():\n os.system('urls_32.bat')\n\n\ndef urls_32():\n os.system('urls_32.bat')\n\n\ndef urls_33():\n os.system('urls_33.bat')\n\n\ndef urls_34():\n os.system('urls_34.bat')\n\n\ndef urls_35():\n os.system('urls_35.bat')\n\n\ndef urls_36():\n os.system('urls_36.bat')\n\n\ndef urls_37():\n os.system('urls_37.bat')\n\n\ndef urls_38():\n os.system('urls_38.bat')\n\n\ndef urls_38():\n os.system('urls_39.bat')\n\n\n<mask token>\n\n\ndef urls_40():\n os.system('urls_41.bat')\n\n\ndef urls_41():\n os.system('urls_41.bat')\n\n\ndef urls_42():\n os.system('urls_42.bat')\n\n\ndef urls_43():\n os.system('urls_43.bat')\n\n\ndef urls_44():\n os.system('urls_44.bat')\n\n\ndef urls_45():\n os.system('urls_45.bat')\n\n\ndef urls_46():\n os.system('urls_46.bat')\n\n\ndef urls_47():\n os.system('urls_47.bat')\n\n\ndef urls_47():\n os.system('urls_48.bat')\n\n\ndef urls_48():\n os.system('urls_49.bat')\n\n\ndef urls_49():\n os.system('urls_50.bat')\n\n\ndef urls_50():\n os.system('urls_50.bat')\n\n\ndef urls_51():\n os.system('urls_51.bat')\n\n\ndef urls_52():\n os.system('urls_52.bat')\n\n\ndef urls_53():\n os.system('urls_53.bat')\n\n\ndef urls_54():\n os.system('urls_54.bat')\n\n\ndef urls_55():\n os.system('urls_55.bat')\n\n\ndef urls_56():\n os.system('urls_56.bat')\n\n\ndef urls_56():\n os.system('urls_57.bat')\n\n\ndef urls_57():\n os.system('urls_58.bat')\n\n\ndef urls_58():\n os.system('urls_59.bat')\n\n\ndef urls_59():\n os.system('urls_59.bat')\n\n\ndef urls_60():\n os.system('urls_60.bat')\n\n\ndef urls_61():\n os.system('urls_61.bat')\n\n\ndef urls_62():\n os.system('urls_62.bat')\n\n\ndef urls_63():\n os.system('urls_63.bat')\n\n\ndef urls_64():\n os.system('urls_64.bat')\n\n\ndef urls_65():\n os.system('urls_65.bat')\n\n\n<mask token>\n",
"step-5": "import os\nimport multiprocessing\nimport time\nimport psycopg2\n#os.system(\"myproject1\\\\runscrapy2.py\")\n\n#from scrapy import cmdline\n#os.system(\"scrapy crawl parts\")\n#cmdline.execute(\"cd myproject1\".split())\n#cmdline.execute(\"myproject1\\\\runscrapy.bat\".split())\n\n# start = time.perf_counter()\n\n\n\nconnection = psycopg2.connect(\n host=\"localhost\",\n user=\"postgres\",\n database=\"SCRAPY_DB\",\n password=\"yolo12\",\n)\ncursor = connection.cursor()\n\n\ncursor.execute(\"DROP TABLE IF EXISTS aa_otoyedekcim\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS aa_otoyedekcim (part_no TEXT, description TEXT, price TEXT, cur_url TEXT)\")\nconnection.commit()\nconnection.close()\n\n\n\n\n\ndef urls():\n os.system('urls.bat')\n\ndef urls_1():\n os.system('urls_1.bat')\n\ndef urls_2():\n os.system('urls_2.bat')\n\ndef urls_3():\n os.system('urls_3.bat')\n\ndef urls_4():\n os.system('urls_4.bat')\n\ndef urls_5():\n os.system('urls_5.bat')\n\ndef urls_6():\n os.system('urls_6.bat')\n\ndef urls_7():\n os.system('urls_7.bat')\n\ndef urls_8():\n os.system('urls_8.bat')\n\ndef urls_9():\n os.system('urls_9.bat')\n\ndef urls_10():\n os.system('urls_10.bat')\n\ndef urls_11():\n os.system('urls_11.bat')\n\ndef urls_12():\n os.system('urls_12.bat')\n\ndef urls_13():\n os.system('urls_13.bat')\n\ndef urls_14():\n os.system('urls_14.bat')\n\ndef urls_15():\n os.system('urls_15.bat')\n\ndef urls_16():\n os.system('urls_16.bat')\n\ndef urls_17():\n os.system('urls_17.bat')\n\ndef urls_18():\n os.system('urls_18.bat')\n\ndef urls_19():\n os.system('urls_19.bat')\n\ndef urls_20():\n os.system('urls_20.bat')\n\ndef urls_20():\n os.system('urls_21.bat')\n\ndef urls_21():\n os.system('urls_22.bat')\n\ndef urls_22():\n os.system('urls_23.bat')\n\ndef urls_23():\n os.system('urls_23.bat')\n\ndef urls_24():\n os.system('urls_24.bat')\n\ndef urls_25():\n os.system('urls_25.bat')\n\ndef urls_26():\n os.system('urls_26.bat')\n\ndef urls_27():\n os.system('urls_27.bat')\n\ndef urls_28():\n os.system('urls_28.bat')\n\ndef urls_29():\n os.system('urls_29.bat')\n\ndef urls_29():\n os.system('urls_30.bat')\n\ndef urls_30():\n os.system('urls_31.bat')\n\ndef urls_31():\n os.system('urls_32.bat')\n\ndef urls_32():\n os.system('urls_32.bat')\n\ndef urls_33():\n os.system('urls_33.bat')\n\ndef urls_34():\n os.system('urls_34.bat')\n\ndef urls_35():\n os.system('urls_35.bat')\n\ndef urls_36():\n os.system('urls_36.bat')\n\ndef urls_37():\n os.system('urls_37.bat')\n\ndef urls_38():\n os.system('urls_38.bat')\n\ndef urls_38():\n os.system('urls_39.bat')\n\ndef urls_39():\n os.system('urls_40.bat')\n\ndef urls_40():\n os.system('urls_41.bat')\n\ndef urls_41():\n os.system('urls_41.bat')\n\ndef urls_42():\n os.system('urls_42.bat')\n\ndef urls_43():\n os.system('urls_43.bat')\n\ndef urls_44():\n os.system('urls_44.bat')\n\ndef urls_45():\n os.system('urls_45.bat')\n\ndef urls_46():\n os.system('urls_46.bat')\n\ndef urls_47():\n os.system('urls_47.bat')\n\ndef urls_47():\n os.system('urls_48.bat')\n\ndef urls_48():\n os.system('urls_49.bat')\n\ndef urls_49():\n os.system('urls_50.bat')\n\ndef urls_50():\n os.system('urls_50.bat')\n\ndef urls_51():\n os.system('urls_51.bat')\n\ndef urls_52():\n os.system('urls_52.bat')\n\ndef urls_53():\n os.system('urls_53.bat')\n\ndef urls_54():\n os.system('urls_54.bat')\n\ndef urls_55():\n os.system('urls_55.bat')\n\ndef urls_56():\n os.system('urls_56.bat')\n\ndef urls_56():\n os.system('urls_57.bat')\n\ndef urls_57():\n os.system('urls_58.bat')\n\ndef urls_58():\n os.system('urls_59.bat')\n\ndef urls_59():\n os.system('urls_59.bat')\n\ndef urls_60():\n os.system('urls_60.bat')\n\ndef urls_61():\n os.system('urls_61.bat')\n\ndef urls_62():\n os.system('urls_62.bat')\n\ndef urls_63():\n os.system('urls_63.bat')\n\ndef urls_64():\n os.system('urls_64.bat')\n\ndef urls_65():\n os.system('urls_65.bat')\n\n\n\n\n\nu1 = multiprocessing.Process(target=urls)\nu2 = multiprocessing.Process(target=urls_1)\nu3 = multiprocessing.Process(target=urls_2)\nu4 = multiprocessing.Process(target=urls_3)\nu5 = multiprocessing.Process(target=urls_4)\nu6 = multiprocessing.Process(target=urls_5)\nu7 = multiprocessing.Process(target=urls_6)\nu8 = multiprocessing.Process(target=urls_7)\nu9 = multiprocessing.Process(target=urls_8)\nu10 = multiprocessing.Process(target=urls_9)\nu11 = multiprocessing.Process(target=urls_10)\nu12 = multiprocessing.Process(target=urls_11)\nu13 = multiprocessing.Process(target=urls_12)\nu14 = multiprocessing.Process(target=urls_13)\nu15 = multiprocessing.Process(target=urls_14)\nu16 = multiprocessing.Process(target=urls_15)\nu16 = multiprocessing.Process(target=urls_16)\nu17 = multiprocessing.Process(target=urls_17)\nu18 = multiprocessing.Process(target=urls_18)\nu19 = multiprocessing.Process(target=urls_19)\nu20 = multiprocessing.Process(target=urls_20)\nu21 = multiprocessing.Process(target=urls_21)\nu22 = multiprocessing.Process(target=urls_22)\nu23 = multiprocessing.Process(target=urls_23)\nu24 = multiprocessing.Process(target=urls_24)\nu25 = multiprocessing.Process(target=urls_25)\nu26 = multiprocessing.Process(target=urls_26)\nu27 = multiprocessing.Process(target=urls_27)\nu28 = multiprocessing.Process(target=urls_28)\nu29 = multiprocessing.Process(target=urls_29)\nu30 = multiprocessing.Process(target=urls_30)\nu31 = multiprocessing.Process(target=urls_31)\nu32 = multiprocessing.Process(target=urls_32)\nu33 = multiprocessing.Process(target=urls_33)\nu34 = multiprocessing.Process(target=urls_34)\nu35 = multiprocessing.Process(target=urls_35)\nu36 = multiprocessing.Process(target=urls_36)\nu37 = multiprocessing.Process(target=urls_37)\nu38 = multiprocessing.Process(target=urls_38)\nu39 = multiprocessing.Process(target=urls_39)\nu40 = multiprocessing.Process(target=urls_40)\nu41 = multiprocessing.Process(target=urls_41)\nu42 = multiprocessing.Process(target=urls_42)\nu43 = multiprocessing.Process(target=urls_43)\nu44 = multiprocessing.Process(target=urls_44)\nu45 = multiprocessing.Process(target=urls_45)\nu46 = multiprocessing.Process(target=urls_46)\nu47 = multiprocessing.Process(target=urls_47)\nu48 = multiprocessing.Process(target=urls_48)\nu49 = multiprocessing.Process(target=urls_49)\nu50 = multiprocessing.Process(target=urls_50)\nu51 = multiprocessing.Process(target=urls_51)\nu52 = multiprocessing.Process(target=urls_52)\nu53 = multiprocessing.Process(target=urls_53)\nu54 = multiprocessing.Process(target=urls_54)\nu55 = multiprocessing.Process(target=urls_55)\nu56 = multiprocessing.Process(target=urls_56)\nu57 = multiprocessing.Process(target=urls_57)\nu58 = multiprocessing.Process(target=urls_58)\nu59 = multiprocessing.Process(target=urls_59)\nu60 = multiprocessing.Process(target=urls_60)\nu61 = multiprocessing.Process(target=urls_61)\nu62 = multiprocessing.Process(target=urls_62)\nu63 = multiprocessing.Process(target=urls_63)\nu64 = multiprocessing.Process(target=urls_64)\nu65 = multiprocessing.Process(target=urls_65)\n\n\nif __name__ == '__main__':\n u1.start()\n u2.start()\n u3.start()\n u4.start()\n u5.start()\n u6.start()\n u7.start()\n u8.start()\n u9.start()\n u10.start()\n u11.start()\n u12.start()\n u13.start()\n u14.start()\n u15.start()\n u16.start()\n u17.start()\n u18.start()\n u19.start()\n u20.start()\n u21.start()\n u22.start()\n\n u1.join()\n u2.join()\n u3.join()\n u4.join()\n u5.join()\n u6.join()\n u7.join()\n u8.join()\n u9.join()\n u10.join()\n u11.join()\n u12.join()\n u13.join()\n u14.join()\n u15.join()\n u16.join()\n u17.join()\n u18.join()\n u19.join()\n u20.join()\n u21.join()\n u22.join()\n\n os.system('1_runmebaby_3.bat')\n\n'''\nif __name__ == '__main__':\n u23.start()\n u24.start()\n u25.start()\n u26.start()\n u27.start()\n u28.start()\n u29.start()\n u30.start()\n u31.start()\n u32.start()\n u33.start()\n u34.start()\n u35.start()\n u36.start()\n u37.start()\n u38.start()\n u39.start()\n u40.start()\n u41.start()\n u42.start()\n u43.start()\n u44.start()\n\n u23.join()\n u24.join()\n u25.join()\n u26.join()\n u27.join()\n u28.join()\n u29.join()\n u30.join()\n u31.join()\n u32.join()\n u33.join()\n u34.join()\n u35.join()\n u36.join()\n u37.join()\n u38.join()\n u39.join()\n u40.join()\n u41.join()\n u42.join()\n u43.join()\n u44.join()\n\n\n\nif __name__ == '__main__':\n u45.start()\n u46.start()\n u47.start()\n u48.start()\n u49.start()\n u50.start()\n u51.start()\n u52.start()\n u53.start()\n u54.start()\n u55.start()\n u56.start()\n u57.start()\n u58.start()\n u59.start()\n u60.start()\n u61.start()\n u62.start()\n u63.start()\n u64.start()\n u65.start()\n\n u45.join()\n u46.join()\n u47.join()\n u48.join()\n u49.join()\n u50.join()\n u51.join()\n u52.join()\n u53.join()\n u54.join()\n u55.join()\n u56.join()\n u57.join()\n u58.join()\n u59.join()\n u60.join()\n u61.join()\n u62.join()\n u63.join()\n u64.join()\n u65.join()\n\n # finish = time.perf_counter()\n # print(f'Successfully finished in {round((finish-start)/60, 2)} minute(s) bro')\n\n'''",
"step-ids": [
39,
53,
59,
67,
75
]
}
|
[
39,
53,
59,
67,
75
] |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.urls import reverse_lazy
from django.utils import timezone
from time import time
import json
from .models import Attendance, Disciple
from users.models import CustomUser
class AttendanceListView(ListView):
model = Attendance
template_name = 'attendance_list.html'
def get_queryset(self):
return self.model.objects.order_by('-date')
class AttendanceDetailView(DetailView):
model = Attendance
template_name = 'attendance_detail.html'
"""
class AttendanceCreateView(CreateView):
model = Attendance
template_name = 'attendance_new.html'
fields = ['title', 'document']
def form_valid(self, form):
obj = form.save(commit=False)
obj.author = self.request.user
obj.date = timezone.now()
obj.save()
return super().form_valid(form)
"""
class AttendanceCreateView(CreateView):
model = Attendance
template_name = 'attendance_new.html'
fields = ['group', 'disciple']
def get_context_data(self, *args, **kwargs):
groups_choices = [
'ИУ1',
'ИУ2',
'ИУ3',
'ИУ4',
'ИУ5',
'ИУ6',
'ИУ7',
'ИУ8',
]
context = super(AttendanceCreateView, self).get_context_data(*args, **kwargs)
context['students'] = CustomUser.objects.filter(student_group='ИУ6')
context['disciples'] = Disciple.objects.all()
context['groups'] = groups_choices
return context
def form_valid(self, form):
obj = form.save(commit=False)
obj.author = self.request.user
obj.date = timezone.now()
#obj.disciple =
fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'
#print(fname)
form_data = self.request.POST
print(form_data)
#Process form_data; mk csv_file based on it; save it to obj.document
#obj.document = doc
obj.save()
print(form, dir(form))
return super().form_valid(form)
|
normal
|
{
"blob_id": "38c78a51a50ee9844aec8b8cdcdd42b858748518",
"index": 2552,
"step-1": "<mask token>\n\n\nclass AttendanceDetailView(DetailView):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-2": "<mask token>\n\n\nclass AttendanceListView(ListView):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-3": "<mask token>\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom time import time\nimport json\nfrom .models import Attendance, Disciple\nfrom users.models import CustomUser\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\n\nfrom time import time\nimport json\n\nfrom .models import Attendance, Disciple\nfrom users.models import CustomUser\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n\"\"\"\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['title', 'document']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n obj.save()\n\n return super().form_valid(form)\n\"\"\"\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = [\n 'ИУ1',\n 'ИУ2',\n 'ИУ3',\n 'ИУ4',\n 'ИУ5',\n 'ИУ6',\n 'ИУ7',\n 'ИУ8',\n ]\n\n\n context = super(AttendanceCreateView, self).get_context_data(*args, **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n #obj.disciple =\n\n\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n #print(fname)\n form_data = self.request.POST\n print(form_data)\n\n #Process form_data; mk csv_file based on it; save it to obj.document\n\n #obj.document = doc\n\n obj.save()\n print(form, dir(form))\n\n return super().form_valid(form)\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
<|reserved_special_token_0|>
def position(self):
return self._pos * self.scale
<|reserved_special_token_0|>
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from machine import Pin
ENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""MicroPython rotary encoder library."""
from machine import Pin
ENC_STATES = (0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0)
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP,
scale=1, min=0, max=100, reverse=False):
self.pin_x = (pin_x if isinstance(pin_x, Pin) else
Pin(pin_x, mode=Pin.IN, pull=pin_mode))
self.pin_y = (pin_y if isinstance(pin_y, Pin) else
Pin(pin_y, mode=Pin.IN, pull=pin_mode))
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
# The following variables are assigned to in the interrupt callback,
# so we have to allocate them here.
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 0x0f
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(
trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(
trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
|
flexible
|
{
"blob_id": "1406b2ab78b52823a8f455c8e2719f6bd84bd168",
"index": 822,
"step-1": "<mask token>\n\n\nclass Encoder(object):\n\n def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=\n 1, min=0, max=100, reverse=False):\n self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=\n Pin.IN, pull=pin_mode)\n self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=\n Pin.IN, pull=pin_mode)\n self.pin_mode = pin_mode\n self.scale = scale\n self.min = min\n self.max = max\n self.reverse = 1 if reverse else -1\n self._pos = -1\n self._readings = 0\n self._state = 0\n self.set_callbacks(self._callback)\n\n def _callback(self, line):\n self._readings = (self._readings << 2 | self.pin_x.value() << 1 |\n self.pin_y.value()) & 15\n self._state = ENC_STATES[self._readings] * self.reverse\n if self._state:\n self._pos = min(max(self.min, self._pos + self._state), self.max)\n <mask token>\n\n def position(self):\n return self._pos * self.scale\n <mask token>\n\n def setMax(self, Max):\n self.max = Max\n\n def setMin(self, Min):\n self.min = Min\n\n def setScale(self, Scale):\n self.scale = Scale\n",
"step-2": "<mask token>\n\n\nclass Encoder(object):\n\n def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=\n 1, min=0, max=100, reverse=False):\n self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=\n Pin.IN, pull=pin_mode)\n self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=\n Pin.IN, pull=pin_mode)\n self.pin_mode = pin_mode\n self.scale = scale\n self.min = min\n self.max = max\n self.reverse = 1 if reverse else -1\n self._pos = -1\n self._readings = 0\n self._state = 0\n self.set_callbacks(self._callback)\n\n def _callback(self, line):\n self._readings = (self._readings << 2 | self.pin_x.value() << 1 |\n self.pin_y.value()) & 15\n self._state = ENC_STATES[self._readings] * self.reverse\n if self._state:\n self._pos = min(max(self.min, self._pos + self._state), self.max)\n\n def set_callbacks(self, callback=None):\n self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n\n def position(self):\n return self._pos * self.scale\n\n def reset(self):\n self._pos = 0\n\n def setMax(self, Max):\n self.max = Max\n\n def setMin(self, Min):\n self.min = Min\n\n def setScale(self, Scale):\n self.scale = Scale\n",
"step-3": "<mask token>\nENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0\n\n\nclass Encoder(object):\n\n def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=\n 1, min=0, max=100, reverse=False):\n self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=\n Pin.IN, pull=pin_mode)\n self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=\n Pin.IN, pull=pin_mode)\n self.pin_mode = pin_mode\n self.scale = scale\n self.min = min\n self.max = max\n self.reverse = 1 if reverse else -1\n self._pos = -1\n self._readings = 0\n self._state = 0\n self.set_callbacks(self._callback)\n\n def _callback(self, line):\n self._readings = (self._readings << 2 | self.pin_x.value() << 1 |\n self.pin_y.value()) & 15\n self._state = ENC_STATES[self._readings] * self.reverse\n if self._state:\n self._pos = min(max(self.min, self._pos + self._state), self.max)\n\n def set_callbacks(self, callback=None):\n self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n\n def position(self):\n return self._pos * self.scale\n\n def reset(self):\n self._pos = 0\n\n def setMax(self, Max):\n self.max = Max\n\n def setMin(self, Min):\n self.min = Min\n\n def setScale(self, Scale):\n self.scale = Scale\n",
"step-4": "<mask token>\nfrom machine import Pin\nENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0\n\n\nclass Encoder(object):\n\n def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=\n 1, min=0, max=100, reverse=False):\n self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=\n Pin.IN, pull=pin_mode)\n self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=\n Pin.IN, pull=pin_mode)\n self.pin_mode = pin_mode\n self.scale = scale\n self.min = min\n self.max = max\n self.reverse = 1 if reverse else -1\n self._pos = -1\n self._readings = 0\n self._state = 0\n self.set_callbacks(self._callback)\n\n def _callback(self, line):\n self._readings = (self._readings << 2 | self.pin_x.value() << 1 |\n self.pin_y.value()) & 15\n self._state = ENC_STATES[self._readings] * self.reverse\n if self._state:\n self._pos = min(max(self.min, self._pos + self._state), self.max)\n\n def set_callbacks(self, callback=None):\n self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.\n IRQ_RISING, handler=callback)\n\n def position(self):\n return self._pos * self.scale\n\n def reset(self):\n self._pos = 0\n\n def setMax(self, Max):\n self.max = Max\n\n def setMin(self, Min):\n self.min = Min\n\n def setScale(self, Scale):\n self.scale = Scale\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"MicroPython rotary encoder library.\"\"\"\n\nfrom machine import Pin\n\n\nENC_STATES = (0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0)\n\n\nclass Encoder(object):\n def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP,\n scale=1, min=0, max=100, reverse=False):\n self.pin_x = (pin_x if isinstance(pin_x, Pin) else\n Pin(pin_x, mode=Pin.IN, pull=pin_mode))\n self.pin_y = (pin_y if isinstance(pin_y, Pin) else\n Pin(pin_y, mode=Pin.IN, pull=pin_mode))\n\n self.pin_mode = pin_mode\n self.scale = scale\n self.min = min\n self.max = max\n self.reverse = 1 if reverse else -1\n\n # The following variables are assigned to in the interrupt callback,\n # so we have to allocate them here.\n self._pos = -1\n self._readings = 0\n self._state = 0\n\n self.set_callbacks(self._callback)\n\n def _callback(self, line):\n self._readings = (self._readings << 2 | self.pin_x.value() << 1 |\n self.pin_y.value()) & 0x0f\n\n self._state = ENC_STATES[self._readings] * self.reverse\n if self._state:\n self._pos = min(max(self.min, self._pos + self._state), self.max)\n\n def set_callbacks(self, callback=None):\n self.irq_x = self.pin_x.callback(\n trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)\n self.irq_y = self.pin_y.callback(\n trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)\n\n def position(self):\n return self._pos * self.scale\n\n def reset(self):\n self._pos = 0\n\n def setMax(self, Max):\n self.max = Max\n\n def setMin(self, Min):\n self.min = Min\n\n def setScale(self, Scale):\n self.scale = Scale\n",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(now.year, now.month, now.day, now.hour, now.minute, now.second)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
now = datetime.datetime.now()
print(now.year, now.month, now.day, now.hour, now.minute, now.second)
<|reserved_special_token_1|>
import datetime
now = datetime.datetime.now()
print(now.year, now.month, now.day, now.hour, now.minute, now.second)
|
flexible
|
{
"blob_id": "3af91de0b25f575ec9d981d7711c710a7e9695e4",
"index": 6819,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-4": "import datetime\nnow = datetime.datetime.now()\nprint(now.year, now.month, now.day, now.hour, now.minute, now.second)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import nltk
import A
from collections import defaultdict
from nltk.align import Alignment, AlignedSent
class BerkeleyAligner():
def __init__(self, align_sents, num_iter):
self.t, self.q = self.train(align_sents, num_iter)
# TODO: Computes the alignments for align_sent, using this model's parameters. Return
# an AlignedSent object, with the sentence pair and the alignments computed.
def align(self, align_sent):
# #will return german --> english alignments
alignments = []
german = align_sent.words
english = align_sent.mots
len_g = len(german)
len_e = len(english)
for j in range(len_g):
g = german[j]
best_prob = (self.t[(g,None)] * self.q[(0,j,len_e,len_g)], None)
best_alignment_point = None
for i in range(len_e):
e = english[i]
ge_prob = (self.t[(e,g)]*self.q[(j,i,len_g,len_e)], i)
eg_prob = (self.t[(g,e)]*self.q[(i,j,len_e,len_g)], i)
best_prob = max(best_prob, ge_prob, eg_prob)
alignments.append((j, best_prob[1]))
return AlignedSent(align_sent.words, align_sent.mots, alignments)
# TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the
# translation and distortion parameters as a tuple.
def train(self, aligned_sents, num_iters):
MIN_PROB = 1.0e-12
#INITIALIZATION
#defining the vocabulary for each language:
#german = words
#english = mots
g_vocab = set()
e_vocab = set()
for sentence in aligned_sents:
g_vocab.update(sentence.words)
e_vocab.update(sentence.mots)
# initializing translation table for english --> german and german --> english
t = defaultdict(float)
for g in g_vocab:
for e in e_vocab:
t[(g,e)] = 1.0 / float(len(g_vocab))
t[(e,g)] = 1.0 / float(len(e_vocab))
# initializing separate alignment tables for english --> german and german --> english
q_eg = defaultdict(float)
q_ge = defaultdict(float)
for sentence in aligned_sents:
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
q_eg[(i,j,len_e,len_g)] = 1.0 / float((len_e+1))
q_ge[(j,i,len_g,len_e)] = 1.0 / float((len_g+1))
print 'Initialization complete'
#INITIALIZATION COMPLETE
for i in range(num_iters):
print 'Iteration ' + str(i+1) + ' /' + str(num_iters)
#E step
count_g_given_e = defaultdict(float)
count_any_g_given_e = defaultdict(float)
eg_alignment_count = defaultdict(float)
eg_alignment_count_for_any_i = defaultdict(float)
count_e_given_g = defaultdict(float)
count_any_e_given_g = defaultdict(float)
ge_alignment_count = defaultdict(float)
ge_alignment_count_for_any_j = defaultdict(float)
for sentence in aligned_sents:
g_sentence = sentence.words
e_sentence = sentence.mots
len_e = len(sentence.mots)
len_g = len(sentence.words)
eg_total = defaultdict(float)
ge_total = defaultdict(float)
#E step (a): compute normalization
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_total[g] += eg_count
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_total[e] += ge_count
# E step (b): collect fractional counts
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
#English --> German
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_normalized = eg_count / eg_total[g]
#German --> English
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_normalized = ge_count / ge_total[e]
#Averaging the probablities
avg_normalized = (eg_normalized + ge_normalized) / 2.0
#Storing counts
count_g_given_e[(g,e)] += avg_normalized
count_any_g_given_e[e] += avg_normalized
eg_alignment_count[(i,j,len_e,len_g)] += avg_normalized
eg_alignment_count_for_any_i[(j,len_e,len_g)] += avg_normalized
count_e_given_g[(e,g)] += avg_normalized
count_any_e_given_g[g] += avg_normalized
ge_alignment_count[(j,i,len_g,len_e)] += avg_normalized
ge_alignment_count_for_any_j[(i,len_g,len_e)] += avg_normalized
#M step
q = defaultdict(float)
for sentence in aligned_sents:
for e in sentence.mots:
for g in sentence.words:
#eng --> germ
t[(g,e)]= count_g_given_e[(g,e)] / count_any_g_given_e[e]
#germ --> eng
t[(e,g)]= count_e_given_g[(e,g)] / count_any_e_given_g[g]
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
#eng --> germ
q[(i,j,len_e,len_g)] = eg_alignment_count[(i,j,len_e,len_g)] / eg_alignment_count_for_any_i[(j,len_e, len_g)]
#germ --> eng
q[(j,i,len_g,len_e)] = ge_alignment_count[(j,i,len_g,len_e)] / ge_alignment_count_for_any_j[(i,len_g,len_e)]
return (t,q)
def main(aligned_sents):
ba = BerkeleyAligner(aligned_sents, 10)
A.save_model_output(aligned_sents, ba, "ba.txt")
avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)
print ('Berkeley Aligner')
print ('---------------------------')
print('Average AER: {0:.3f}\n'.format(avg_aer))
|
normal
|
{
"blob_id": "bf40b516e202af14469cd4012597ba412e663f56",
"index": 5898,
"step-1": "import nltk\nimport A\nfrom collections import defaultdict\nfrom nltk.align import Alignment, AlignedSent\n\nclass BerkeleyAligner():\n\n def __init__(self, align_sents, num_iter):\n\tself.t, self.q = self.train(align_sents, num_iter)\n\t\n # TODO: Computes the alignments for align_sent, using this model's parameters. Return\n # an AlignedSent object, with the sentence pair and the alignments computed.\n def align(self, align_sent):\n#\t#will return german --> english alignments\n \talignments = []\n german = align_sent.words\n english = align_sent.mots\n len_g = len(german)\n len_e = len(english)\n\n for j in range(len_g):\n\t\tg = german[j]\n\t\tbest_prob = (self.t[(g,None)] * self.q[(0,j,len_e,len_g)], None)\n\t\tbest_alignment_point = None\n\t\tfor i in range(len_e):\n \te = english[i]\n \t \t\tge_prob = (self.t[(e,g)]*self.q[(j,i,len_g,len_e)], i)\n\t\t\teg_prob = (self.t[(g,e)]*self.q[(i,j,len_e,len_g)], i)\n\t\t\tbest_prob = max(best_prob, ge_prob, eg_prob)\n\t\t\t\n\t\talignments.append((j, best_prob[1]))\n\n\treturn AlignedSent(align_sent.words, align_sent.mots, alignments)\n\n \n # TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the \n # translation and distortion parameters as a tuple.\n def train(self, aligned_sents, num_iters):\n\tMIN_PROB = 1.0e-12\n\t#INITIALIZATION\n\t#defining the vocabulary for each language:\n\t#german = words\n\t#english = mots\n\tg_vocab = set()\n\te_vocab = set()\n\tfor sentence in aligned_sents:\n\t\tg_vocab.update(sentence.words)\n\t\te_vocab.update(sentence.mots)\n\n\t# initializing translation table for english --> german and german --> english\n\tt = defaultdict(float)\n\tfor g in g_vocab:\n\t\tfor e in e_vocab:\n\t\t\tt[(g,e)] = 1.0 / float(len(g_vocab))\n\t\t\tt[(e,g)] = 1.0 / float(len(e_vocab))\n\t\n\t# initializing separate alignment tables for english --> german and german --> english\n\tq_eg = defaultdict(float)\n\tq_ge = defaultdict(float)\n\tfor sentence in aligned_sents:\n\t\tlen_e=len(sentence.mots)\n\t\tlen_g=len(sentence.words)\n\t\tfor i in range(len_e):\n\t\t\tfor j in range(len_g):\n\t\t\t\tq_eg[(i,j,len_e,len_g)] = 1.0 / float((len_e+1))\n\t\t\t\tq_ge[(j,i,len_g,len_e)] = 1.0 / float((len_g+1))\n\n\tprint 'Initialization complete'\n\t#INITIALIZATION COMPLETE\n\n\tfor i in range(num_iters):\n\t\tprint 'Iteration ' + str(i+1) + ' /' + str(num_iters)\n\t\t#E step\n\t\tcount_g_given_e = defaultdict(float)\n\t\tcount_any_g_given_e = defaultdict(float)\n\t\teg_alignment_count = defaultdict(float)\n\t\teg_alignment_count_for_any_i = defaultdict(float)\n\t\tcount_e_given_g = defaultdict(float)\n\t\tcount_any_e_given_g = defaultdict(float)\n\t\tge_alignment_count = defaultdict(float)\n\t\tge_alignment_count_for_any_j = defaultdict(float)\n\t\t\n\t\tfor sentence in aligned_sents:\n\t\t\tg_sentence = sentence.words\n\t\t\te_sentence = sentence.mots\n\t\t\tlen_e = len(sentence.mots)\n\t\t\tlen_g = len(sentence.words)\n\t\t\teg_total = defaultdict(float)\n\t\t\tge_total = defaultdict(float)\n\n\t\t\t#E step (a): compute normalization\n\t\t\tfor j in range(len_g):\n\t\t\t\t\n\t\t\t\tg = g_sentence[j]\n\t\n\t\t\t\tfor i in range(len_e):\n\t\t\t\t\t\n\t\t\t\t\te = e_sentence[i]\n\n\t\t\t\t\teg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])\n\t\t\t\t\teg_total[g] += eg_count\n\n\t\t\t\t\tge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])\n\t\t\t\t\tge_total[e] += ge_count \n\n\t\t\t# E step (b): collect fractional counts\n\t\t\tfor j in range(len_g):\n\t\t\t\t\n\t\t\t\tg = g_sentence[j]\n\n\t\t\t\tfor i in range(len_e):\n\t\t\t\t\t\n\t\t\t\t\te = e_sentence[i]\n\t\n\t\t\t\t\t#English --> German\n\t\t\t\t\teg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])\n\t\t\t\t\teg_normalized = eg_count / eg_total[g]\n\n\t\t\t\t\t#German --> English\n\t\t\t\t\tge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])\n\t\t\t\t\tge_normalized = ge_count / ge_total[e]\n\n\t\t\t\t\t#Averaging the probablities\n\t\t\t\t\tavg_normalized = (eg_normalized + ge_normalized) / 2.0\n\t\t\t\t\t#Storing counts\n\t\t\t\t\tcount_g_given_e[(g,e)] += avg_normalized\n\t\t\t\t\tcount_any_g_given_e[e] += avg_normalized\n\t\t\t\t\teg_alignment_count[(i,j,len_e,len_g)] += avg_normalized\n\t\t\t\t\teg_alignment_count_for_any_i[(j,len_e,len_g)] += avg_normalized\n\t\t\t\t\tcount_e_given_g[(e,g)] += avg_normalized\n\t\t\t\t\tcount_any_e_given_g[g] += avg_normalized\n\t\t\t\t\tge_alignment_count[(j,i,len_g,len_e)] += avg_normalized\n\t\t\t\t\tge_alignment_count_for_any_j[(i,len_g,len_e)] += avg_normalized\n\n\t\t#M step\n\t\tq = defaultdict(float)\n\t\tfor sentence in aligned_sents:\n\t\t\tfor e in sentence.mots:\n\t\t\t\tfor g in sentence.words:\n\t\t\t\t\t#eng --> germ\n\t\t\t\t\tt[(g,e)]= count_g_given_e[(g,e)] / count_any_g_given_e[e]\n\t\t\t\t\t#germ --> eng\n\t\t\t\t\tt[(e,g)]= count_e_given_g[(e,g)] / count_any_e_given_g[g]\n\n\t\t\tlen_e=len(sentence.mots)\n\t\t\tlen_g=len(sentence.words)\n\t\t\tfor i in range(len_e):\n\t\t\t\tfor j in range(len_g):\n\t\t\t\t\t#eng --> germ\n\t\t\t\t\tq[(i,j,len_e,len_g)] = eg_alignment_count[(i,j,len_e,len_g)] / eg_alignment_count_for_any_i[(j,len_e, len_g)]\n\t\t\t\t\t#germ --> eng\n\t\t\t\t\tq[(j,i,len_g,len_e)] = ge_alignment_count[(j,i,len_g,len_e)] / ge_alignment_count_for_any_j[(i,len_g,len_e)]\n\treturn (t,q)\n\n\ndef main(aligned_sents):\n ba = BerkeleyAligner(aligned_sents, 10)\n A.save_model_output(aligned_sents, ba, \"ba.txt\")\n avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)\n\n print ('Berkeley Aligner')\n print ('---------------------------')\n print('Average AER: {0:.3f}\\n'.format(avg_aer))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
<|reserved_special_token_0|>
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
<|reserved_special_token_0|>
def get_communities(graph, cover):
comm_map = defaultdict(lambda : set())
for u in graph.nodes():
comms = cover.subsetsOf(u)
for c in comms:
comm_map[c].add(u)
return comm_map
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
def print_headers(result_dir):
with open(result_dir + 'cover_num_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Number of Communities'))
with open(result_dir + 'cover_comm_sizes.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Community Size', 'F1 Score'))
with open(result_dir + 'cover_node_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Number of Communities per Node'))
def count_benchmark_cover(result_dir, calc_f1, benchmark):
cover = benchmark.get_cover()
ground_truth = benchmark.get_ground_truth()
comm_map = get_communities(benchmark.get_graph(), cover)
gt_map = get_communities(benchmark.get_graph(), ground_truth)
comm_sizes = cover.subsetSizeMap()
with open(result_dir + 'cover_num_comms.result', 'a') as f:
f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))
with open(result_dir + 'cover_comm_sizes.result', 'a') as f:
for u in cover.getSubsetIds():
comm = comm_map[u]
size = comm_sizes[u]
f1 = f1_score(comm, gt_map) if calc_f1 else 0
f.write(create_line(*benchmark.output_line(), log2(size), f1))
with open(result_dir + 'cover_node_comms.result', 'a') as f:
for u in benchmark.get_graph().nodes():
num_comms = len(cover.subsetsOf(u))
if num_comms > 0:
f.write(create_line(*benchmark.output_line(), log2(num_comms)))
def get_communities(graph, cover):
comm_map = defaultdict(lambda : set())
for u in graph.nodes():
comms = cover.subsetsOf(u)
for c in comms:
comm_map[c].add(u)
return comm_map
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
<|reserved_special_token_1|>
from math import log2
from egosplit.benchmarks.data_structures.cover_benchmark import *
from egosplit.benchmarks.evaluation.utility import create_line
from networkit.stopwatch import clockit
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
def print_headers(result_dir):
with open(result_dir + 'cover_num_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Number of Communities'))
with open(result_dir + 'cover_comm_sizes.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Community Size', 'F1 Score'))
with open(result_dir + 'cover_node_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(),
'Number of Communities per Node'))
def count_benchmark_cover(result_dir, calc_f1, benchmark):
cover = benchmark.get_cover()
ground_truth = benchmark.get_ground_truth()
comm_map = get_communities(benchmark.get_graph(), cover)
gt_map = get_communities(benchmark.get_graph(), ground_truth)
comm_sizes = cover.subsetSizeMap()
with open(result_dir + 'cover_num_comms.result', 'a') as f:
f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))
with open(result_dir + 'cover_comm_sizes.result', 'a') as f:
for u in cover.getSubsetIds():
comm = comm_map[u]
size = comm_sizes[u]
f1 = f1_score(comm, gt_map) if calc_f1 else 0
f.write(create_line(*benchmark.output_line(), log2(size), f1))
with open(result_dir + 'cover_node_comms.result', 'a') as f:
for u in benchmark.get_graph().nodes():
num_comms = len(cover.subsetsOf(u))
if num_comms > 0:
f.write(create_line(*benchmark.output_line(), log2(num_comms)))
def get_communities(graph, cover):
comm_map = defaultdict(lambda : set())
for u in graph.nodes():
comms = cover.subsetsOf(u)
for c in comms:
comm_map[c].add(u)
return comm_map
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
<|reserved_special_token_1|>
from math import log2
from egosplit.benchmarks.data_structures.cover_benchmark import *
from egosplit.benchmarks.evaluation.utility import create_line
from networkit.stopwatch import clockit
# Analyse the result cover of a benchmark run
@clockit
def analyze_cover(benchmarks, result_dir, calc_f1, append):
if not append:
print_headers(result_dir)
for benchmark in benchmarks:
count_benchmark_cover(result_dir, calc_f1, benchmark)
# Print output file headers
def print_headers(result_dir):
with open(result_dir + 'cover_num_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities'))
with open(result_dir + 'cover_comm_sizes.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Community Size', 'F1 Score'))
with open(result_dir + 'cover_node_comms.result', 'w') as f:
f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities per Node'))
# Count the number of communities and their sizes
def count_benchmark_cover(result_dir, calc_f1, benchmark):
cover = benchmark.get_cover()
ground_truth = benchmark.get_ground_truth()
comm_map = get_communities(benchmark.get_graph(), cover)
gt_map = get_communities(benchmark.get_graph(), ground_truth)
comm_sizes = cover.subsetSizeMap()
# Number of communities
with open(result_dir + 'cover_num_comms.result', 'a') as f:
f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))
# Community sizes and F1 scores
with open(result_dir + 'cover_comm_sizes.result', 'a') as f:
for u in cover.getSubsetIds():
comm = comm_map[u]
size = comm_sizes[u]
f1 = f1_score(comm, gt_map) if calc_f1 else 0
f.write(create_line(*benchmark.output_line(), log2(size), f1))
# Number of Communities per Node
with open(result_dir + 'cover_node_comms.result', 'a') as f:
for u in benchmark.get_graph().nodes():
num_comms = len(cover.subsetsOf(u))
if num_comms > 0:
f.write(create_line(*benchmark.output_line(), log2(num_comms)))
def get_communities(graph, cover):
comm_map = defaultdict(lambda: set())
for u in graph.nodes():
comms = cover.subsetsOf(u)
for c in comms:
comm_map[c].add(u)
return comm_map
def f1_score(community, ground_truth):
max_f1 = 0.0
for gt_comm in ground_truth.values():
overlap = len(gt_comm.intersection(community))
if overlap == 0:
continue
precision = overlap / len(community)
recall = overlap / len(gt_comm)
f1 = 2 * precision * recall / (precision + recall)
max_f1 = max(max_f1, f1)
return max_f1
|
flexible
|
{
"blob_id": "dc5b9600828857cc5ea434a7b010cd8aa2589d22",
"index": 6568,
"step-1": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n<mask token>\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n",
"step-2": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n<mask token>\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n",
"step-3": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\ndef print_headers(result_dir):\n with open(result_dir + 'cover_num_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities'))\n with open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Community Size', 'F1 Score'))\n with open(result_dir + 'cover_node_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities per Node'))\n\n\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n cover = benchmark.get_cover()\n ground_truth = benchmark.get_ground_truth()\n comm_map = get_communities(benchmark.get_graph(), cover)\n gt_map = get_communities(benchmark.get_graph(), ground_truth)\n comm_sizes = cover.subsetSizeMap()\n with open(result_dir + 'cover_num_comms.result', 'a') as f:\n f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n with open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n for u in cover.getSubsetIds():\n comm = comm_map[u]\n size = comm_sizes[u]\n f1 = f1_score(comm, gt_map) if calc_f1 else 0\n f.write(create_line(*benchmark.output_line(), log2(size), f1))\n with open(result_dir + 'cover_node_comms.result', 'a') as f:\n for u in benchmark.get_graph().nodes():\n num_comms = len(cover.subsetsOf(u))\n if num_comms > 0:\n f.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n",
"step-4": "from math import log2\nfrom egosplit.benchmarks.data_structures.cover_benchmark import *\nfrom egosplit.benchmarks.evaluation.utility import create_line\nfrom networkit.stopwatch import clockit\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\ndef print_headers(result_dir):\n with open(result_dir + 'cover_num_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities'))\n with open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Community Size', 'F1 Score'))\n with open(result_dir + 'cover_node_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities per Node'))\n\n\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n cover = benchmark.get_cover()\n ground_truth = benchmark.get_ground_truth()\n comm_map = get_communities(benchmark.get_graph(), cover)\n gt_map = get_communities(benchmark.get_graph(), ground_truth)\n comm_sizes = cover.subsetSizeMap()\n with open(result_dir + 'cover_num_comms.result', 'a') as f:\n f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n with open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n for u in cover.getSubsetIds():\n comm = comm_map[u]\n size = comm_sizes[u]\n f1 = f1_score(comm, gt_map) if calc_f1 else 0\n f.write(create_line(*benchmark.output_line(), log2(size), f1))\n with open(result_dir + 'cover_node_comms.result', 'a') as f:\n for u in benchmark.get_graph().nodes():\n num_comms = len(cover.subsetsOf(u))\n if num_comms > 0:\n f.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n",
"step-5": "from math import log2\n\nfrom egosplit.benchmarks.data_structures.cover_benchmark import *\nfrom egosplit.benchmarks.evaluation.utility import create_line\nfrom networkit.stopwatch import clockit\n\n\n# Analyse the result cover of a benchmark run\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n\tif not append:\n\t\tprint_headers(result_dir)\n\n\tfor benchmark in benchmarks:\n\t\tcount_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n# Print output file headers\ndef print_headers(result_dir):\n\twith open(result_dir + 'cover_num_comms.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities'))\n\twith open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Community Size', 'F1 Score'))\n\twith open(result_dir + 'cover_node_comms.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities per Node'))\n\n\n# Count the number of communities and their sizes\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n\tcover = benchmark.get_cover()\n\tground_truth = benchmark.get_ground_truth()\n\tcomm_map = get_communities(benchmark.get_graph(), cover)\n\tgt_map = get_communities(benchmark.get_graph(), ground_truth)\n\tcomm_sizes = cover.subsetSizeMap()\n\n\t# Number of communities\n\twith open(result_dir + 'cover_num_comms.result', 'a') as f:\n\t\tf.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n\n\t# Community sizes and F1 scores\n\twith open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n\t\tfor u in cover.getSubsetIds():\n\t\t\tcomm = comm_map[u]\n\t\t\tsize = comm_sizes[u]\n\t\t\tf1 = f1_score(comm, gt_map) if calc_f1 else 0\n\t\t\tf.write(create_line(*benchmark.output_line(), log2(size), f1))\n\n\t# Number of Communities per Node\n\twith open(result_dir + 'cover_node_comms.result', 'a') as f:\n\t\tfor u in benchmark.get_graph().nodes():\n\t\t\tnum_comms = len(cover.subsetsOf(u))\n\t\t\tif num_comms > 0:\n\t\t\t\tf.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n\tcomm_map = defaultdict(lambda: set())\n\tfor u in graph.nodes():\n\t\tcomms = cover.subsetsOf(u)\n\t\tfor c in comms:\n\t\t\tcomm_map[c].add(u)\n\n\treturn comm_map\n\n\ndef f1_score(community, ground_truth):\n\tmax_f1 = 0.0\n\tfor gt_comm in ground_truth.values():\n\t\toverlap = len(gt_comm.intersection(community))\n\t\tif overlap == 0:\n\t\t\tcontinue\n\t\tprecision = overlap / len(community)\n\t\trecall = overlap / len(gt_comm)\n\t\tf1 = 2 * precision * recall / (precision + recall)\n\t\tmax_f1 = max(max_f1, f1)\n\n\treturn max_f1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import tensorflow as tf
import numpy as np
import time
import os
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from src.model import get_args
from src.funcs import linear
from src.youtubeface import load_ytf_data
from src.lfw import load_lfw_data
from src.facescrub import load_fs_data
from src.wrapper_basicImg import wrapper_basicImg
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
total_iteration = 300000
m = 512
q = 32
lam = 0.01
beta = 1.
margin = 0.5
s = 32
batch_size = 256
class_num = 1595
train_dataset = 'FS'
eval_dataset = "LFW"
args = get_args()
### Get image and label from tfrecord
image, label, iterator = {}, {}, {}
if train_dataset == 'YTF':
image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')
elif train_dataset == 'FS':
image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')
else:
print("Select proper dataset")
### Get evaluation dataset. Wrapper
wrapper = wrapper_basicImg(dataset=eval_dataset)
if eval_dataset == 'YTF':
image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)
image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test')
elif eval_dataset == 'LFW':
image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')
image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')
### Backbone network (Arcface)
embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)
labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)
### Global step & learning rate
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.003
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)
### My implementation (DIom algorithm)
with tf.variable_scope('DIom'):
fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')
fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')
fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')
h_k = tf.reshape(fc3, [-1, m, q])
h_k = tf.nn.softmax(beta * h_k, axis=2)
index_matrix = tf.range(1, q + 1, dtype=tf.float32)
h = tf.reduce_sum(h_k * index_matrix, axis=2)
h = tf.reshape(h, [-1, m])
h_norm = tf.math.l2_normalize(h, axis=1)
### Loss function
l = tf.one_hot(labels, class_num)
l = tf.matmul(l, tf.transpose(l))
l_float = tf.cast(l, tf.float32)
l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))
label_int = tf.cast(tf.squeeze(l, 1), tf.int32)
inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))
cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)
theta = tf.math.acos(cos_t)
sin_t = tf.math.sin(theta)
cos_mt = tf.math.cos(theta + margin)
sin_mt = tf.math.sin(theta + margin)
logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))
l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)
c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)
# Baseline pairwise-CE
# label_ce = tf.cast(labels, tf.float32)
# l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))
# l_ij = -tf.reduce_mean(l_ij)
# My novel cosine loss
l_ij = tf.reduce_mean(l_ij_logit)
c_ij = tf.reduce_mean(c_ij)
loss = l_ij + lam * c_ij
gradient = tf.gradients(loss, sin_t)
### Optimizer
t_vars = tf.global_variables()
train_vars = [var for var in t_vars if 'DIom' in var.name]
opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)
with tf.Session() as sess:
tf.global_variables_initializer().run()
sess.run(iterator['train'].initializer)
### Training
iteration = sess.run(global_step)
t_opt = [opt_t, loss, l_ij, c_ij]
start_time = time.time()
while iteration != total_iteration:
img, lbl = sess.run([image['train'], label['train']])
train_dict = {
embedding_tensor: img,
labels: lbl
}
_, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)
iteration += 1
if iteration % 10000 == 0:
### Evaluation after training
### Get gallery hash code
# gallery = []
# gallery_label = []
# sess.run(iterator['gallery'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['gallery'], label['gallery']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# hash_code = sess.run(h_norm, feed_dict=gallery_dict)
#
# if gallery == []:
# gallery = hash_code
# gallery_label = lbl
# else:
# gallery = np.concatenate((gallery, hash_code), axis=0)
# gallery_label = np.concatenate((gallery_label, lbl), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Get probe hash code
# probe = []
# probe_label = []
# code_arr = []
# sess.run(iterator['test'].initializer)
# try:
# while True:
# img, lbl = sess.run([image['test'], label['test']])
#
# gallery_dict = {
# embedding_tensor: img
# }
#
# code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)
#
# if probe == []:
# probe = hash_code
# probe_label = lbl
# code_arr = code
# else:
# probe = np.concatenate((probe, hash_code), axis=0)
# probe_label = np.concatenate((probe_label, lbl), axis=0)
# code_arr = np.concatenate((code_arr, code), axis=0)
#
# except tf.errors.OutOfRangeError:
# pass
#
# ### Code frequency
# code_arr = np.around(code_arr)
# count_arr = []
# for i in range(q):
# count_arr.append(np.count_nonzero(code_arr == i + 1))
#
# plt.clf()
# plt.bar(range(1, q+1), count_arr)
# plt.savefig('./plt/code_' + str(iteration) + '.png')
# ### Calculate MAP
# gtp = 40
# k = 50
#
# distance = np.matmul(probe, gallery.T)
# arg_idx = np.argsort(-distance, axis=1)
#
# max_label = gallery_label[arg_idx[:, :k]]
# match_matrix = np.equal(max_label, probe_label[:,np.newaxis])
#
# tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)
# ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp
# MAP = np.mean(ap)
### Calculate EER
dist_list = []
label_list = []
code_list = []
while wrapper.samples_left > 0:
imgs, lbls = wrapper.get_next_batch(100)
imgs = np.reshape(imgs, [-1, 512])
eer_dict = {
embedding_tensor: imgs
}
code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)
code = np.reshape(code, [-1, 2, m])
distance = np.sum(np.prod(code, axis=1), axis=1)
if dist_list == []:
dist_list = distance
label_list = lbls
code_list = int_code
else:
dist_list = np.concatenate((dist_list, distance), axis=0)
label_list = np.concatenate((label_list, lbls), axis=0)
code_list = np.concatenate((code_list, int_code), axis=0)
wrapper.samples_left= np.size(wrapper.labels, axis=0)
wrapper.next_batch_pointer = 0
fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)
fnr = 1 - tpr
# eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
### Code frequency
code_arr = np.around(code_list)
count_arr = []
for i in range(q):
count_arr.append(np.count_nonzero(code_arr == i + 1))
plt.clf()
plt.bar(range(1, q + 1), count_arr)
plt.savefig('./plt/code_' + str(iteration) + '.png')
time_taken = time.time() - start_time
MAP = 0
# print("good")
print("[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f"
% (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))
start_time = time.time()
# np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))
### Save model.
# save_vars = [var for var in t_vars if 'DIom' in var.name]
# saver = tf.train.Saver(var_list=save_vars)
# saver.save(sess, './model/DIom_layer')
|
normal
|
{
"blob_id": "459dd9302f7100ad02119cc94b735b19287f21e5",
"index": 5956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.0\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = 'LFW'\n args = get_args()\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(\n batch_size, 'train')\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(\n batch_size, 'train')\n else:\n print('Select proper dataset')\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(\n batch_size, 'test')\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(\n batch_size, 'probe')\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512],\n dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None], dtype=tf.int32)\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate,\n global_step, total_iteration, 0.96)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0.0, 1.0), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1.0, 1.0 - 1e-06)\n theta = tf.math.acos(cos_t)\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n logit = l * s * tf.concat([sin_t, cos_mt], 1) + (1 - l) * s * tf.concat([\n sin_mt, cos_t], 1)\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=\n logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n loss = l_ij + lam * c_ij\n gradient = tf.gradients(loss, sin_t)\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(\n loss, var_list=train_vars, global_step=global_step)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n train_dict = {embedding_tensor: img, labels: lbl}\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=\n train_dict)\n iteration += 1\n if iteration % 10000 == 0:\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n imgs = np.reshape(imgs, [-1, 512])\n eer_dict = {embedding_tensor: imgs}\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n distance = np.sum(np.prod(code, axis=1), axis=1)\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n else:\n dist_list = np.concatenate((dist_list, distance),\n axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code),\n axis=0)\n wrapper.samples_left = np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n fpr, tpr, threshold = roc_curve(label_list, dist_list,\n pos_label=1)\n fnr = 1 - tpr\n eer = fpr[np.nanargmin(np.absolute(fnr - fpr))]\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n time_taken = time.time() - start_time\n MAP = 0\n print(\n '[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f'\n % (iteration, train_loss, loss_l, loss_c, MAP, eer,\n time_taken))\n start_time = time.time()\n",
"step-3": "import tensorflow as tf\nimport numpy as np\nimport time\nimport os\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\nfrom src.model import get_args\nfrom src.funcs import linear\nfrom src.youtubeface import load_ytf_data\nfrom src.lfw import load_lfw_data\nfrom src.facescrub import load_fs_data\nfrom src.wrapper_basicImg import wrapper_basicImg\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.0\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = 'LFW'\n args = get_args()\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(\n batch_size, 'train')\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(\n batch_size, 'train')\n else:\n print('Select proper dataset')\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(\n batch_size, 'test')\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'\n ] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(\n batch_size, 'probe')\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512],\n dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None], dtype=tf.int32)\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate,\n global_step, total_iteration, 0.96)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0.0, 1.0), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1.0, 1.0 - 1e-06)\n theta = tf.math.acos(cos_t)\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n logit = l * s * tf.concat([sin_t, cos_mt], 1) + (1 - l) * s * tf.concat([\n sin_mt, cos_t], 1)\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=\n logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n loss = l_ij + lam * c_ij\n gradient = tf.gradients(loss, sin_t)\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(\n loss, var_list=train_vars, global_step=global_step)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n train_dict = {embedding_tensor: img, labels: lbl}\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=\n train_dict)\n iteration += 1\n if iteration % 10000 == 0:\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n imgs = np.reshape(imgs, [-1, 512])\n eer_dict = {embedding_tensor: imgs}\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n distance = np.sum(np.prod(code, axis=1), axis=1)\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n else:\n dist_list = np.concatenate((dist_list, distance),\n axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code),\n axis=0)\n wrapper.samples_left = np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n fpr, tpr, threshold = roc_curve(label_list, dist_list,\n pos_label=1)\n fnr = 1 - tpr\n eer = fpr[np.nanargmin(np.absolute(fnr - fpr))]\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n time_taken = time.time() - start_time\n MAP = 0\n print(\n '[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f'\n % (iteration, train_loss, loss_l, loss_c, MAP, eer,\n time_taken))\n start_time = time.time()\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport time\nimport os\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\n\n\nfrom src.model import get_args\nfrom src.funcs import linear\nfrom src.youtubeface import load_ytf_data\nfrom src.lfw import load_lfw_data\nfrom src.facescrub import load_fs_data\nfrom src.wrapper_basicImg import wrapper_basicImg\n\n\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n total_iteration = 300000\n m = 512\n q = 32\n lam = 0.01\n beta = 1.\n margin = 0.5\n s = 32\n batch_size = 256\n class_num = 1595\n train_dataset = 'FS'\n eval_dataset = \"LFW\"\n args = get_args()\n\n ### Get image and label from tfrecord\n image, label, iterator = {}, {}, {}\n if train_dataset == 'YTF':\n image['train'], label['train'], iterator['train'] = load_ytf_data(batch_size, 'train')\n\n elif train_dataset == 'FS':\n image['train'], label['train'], iterator['train'] = load_fs_data(batch_size, 'train')\n\n else:\n print(\"Select proper dataset\")\n\n ### Get evaluation dataset. Wrapper\n wrapper = wrapper_basicImg(dataset=eval_dataset)\n if eval_dataset == 'YTF':\n image['gallery'], label['gallery'], iterator['gallery'] = load_ytf_data(batch_size, 'train', eval=True)\n image['test'], label['test'], iterator['test'] = load_ytf_data(batch_size, 'test')\n\n elif eval_dataset == 'LFW':\n image['gallery'], label['gallery'], iterator['gallery'] = load_lfw_data(batch_size, 'gallery')\n image['test'], label['test'], iterator['test'] = load_lfw_data(batch_size, 'probe')\n\n\n ### Backbone network (Arcface)\n embedding_tensor = tf.placeholder(name='img_inputs', shape=[None, 512], dtype=tf.float32)\n labels = tf.placeholder(name='label', shape=[None, ], dtype=tf.int32)\n\n ### Global step & learning rate\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = 0.003\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, total_iteration, 0.96)\n\n ### My implementation (DIom algorithm)\n with tf.variable_scope('DIom'):\n fc1 = linear(tf.nn.relu(embedding_tensor), 1024, 'fc1')\n fc2 = linear(tf.nn.relu(fc1), 1024, 'fc2')\n fc3 = linear(tf.nn.relu(fc2), m * q, 'fc3')\n\n h_k = tf.reshape(fc3, [-1, m, q])\n h_k = tf.nn.softmax(beta * h_k, axis=2)\n\n index_matrix = tf.range(1, q + 1, dtype=tf.float32)\n h = tf.reduce_sum(h_k * index_matrix, axis=2)\n h = tf.reshape(h, [-1, m])\n h_norm = tf.math.l2_normalize(h, axis=1)\n\n ### Loss function\n l = tf.one_hot(labels, class_num)\n l = tf.matmul(l, tf.transpose(l))\n l_float = tf.cast(l, tf.float32)\n l = tf.reshape(tf.clip_by_value(l_float, 0., 1.), (-1, 1))\n label_int = tf.cast(tf.squeeze(l, 1), tf.int32)\n\n inner_prod = tf.reshape(tf.matmul(h_norm, tf.transpose(h_norm)), (-1, 1))\n cos_t = tf.clip_by_value(inner_prod, -1., 1. - 1e-6)\n theta = tf.math.acos(cos_t)\n\n sin_t = tf.math.sin(theta)\n cos_mt = tf.math.cos(theta + margin)\n sin_mt = tf.math.sin(theta + margin)\n\n logit = l * s * (tf.concat([sin_t, cos_mt], 1)) + (1 - l) * s * (tf.concat([sin_mt, cos_t], 1))\n\n l_ij_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label_int)\n c_ij = tf.abs(tf.reduce_mean(h, axis=0) - (q + 1) / 2)\n\n # Baseline pairwise-CE\n # label_ce = tf.cast(labels, tf.float32)\n # l_ij = l * tf.log(tf.square(inner_prod)) + (1 - l) * tf.log(tf.maximum(1e-6, 1 - tf.square(inner_prod)))\n # l_ij = -tf.reduce_mean(l_ij)\n\n # My novel cosine loss\n l_ij = tf.reduce_mean(l_ij_logit)\n c_ij = tf.reduce_mean(c_ij)\n\n loss = l_ij + lam * c_ij\n\n gradient = tf.gradients(loss, sin_t)\n\n ### Optimizer\n t_vars = tf.global_variables()\n train_vars = [var for var in t_vars if 'DIom' in var.name]\n\n\n opt_t = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, var_list=train_vars, global_step=global_step)\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n sess.run(iterator['train'].initializer)\n\n ### Training\n iteration = sess.run(global_step)\n t_opt = [opt_t, loss, l_ij, c_ij]\n start_time = time.time()\n while iteration != total_iteration:\n img, lbl = sess.run([image['train'], label['train']])\n\n train_dict = {\n embedding_tensor: img,\n labels: lbl\n }\n\n _, train_loss, loss_l, loss_c = sess.run(t_opt, feed_dict=train_dict)\n iteration += 1\n\n if iteration % 10000 == 0:\n ### Evaluation after training\n ### Get gallery hash code\n # gallery = []\n # gallery_label = []\n # sess.run(iterator['gallery'].initializer)\n # try:\n # while True:\n # img, lbl = sess.run([image['gallery'], label['gallery']])\n #\n # gallery_dict = {\n # embedding_tensor: img\n # }\n #\n # hash_code = sess.run(h_norm, feed_dict=gallery_dict)\n #\n # if gallery == []:\n # gallery = hash_code\n # gallery_label = lbl\n # else:\n # gallery = np.concatenate((gallery, hash_code), axis=0)\n # gallery_label = np.concatenate((gallery_label, lbl), axis=0)\n #\n # except tf.errors.OutOfRangeError:\n # pass\n #\n # ### Get probe hash code\n # probe = []\n # probe_label = []\n # code_arr = []\n # sess.run(iterator['test'].initializer)\n # try:\n # while True:\n # img, lbl = sess.run([image['test'], label['test']])\n #\n # gallery_dict = {\n # embedding_tensor: img\n # }\n #\n # code, hash_code = sess.run([h, h_norm], feed_dict=gallery_dict)\n #\n # if probe == []:\n # probe = hash_code\n # probe_label = lbl\n # code_arr = code\n # else:\n # probe = np.concatenate((probe, hash_code), axis=0)\n # probe_label = np.concatenate((probe_label, lbl), axis=0)\n # code_arr = np.concatenate((code_arr, code), axis=0)\n #\n # except tf.errors.OutOfRangeError:\n # pass\n #\n # ### Code frequency\n # code_arr = np.around(code_arr)\n # count_arr = []\n # for i in range(q):\n # count_arr.append(np.count_nonzero(code_arr == i + 1))\n #\n # plt.clf()\n # plt.bar(range(1, q+1), count_arr)\n # plt.savefig('./plt/code_' + str(iteration) + '.png')\n\n # ### Calculate MAP\n # gtp = 40\n # k = 50\n #\n # distance = np.matmul(probe, gallery.T)\n # arg_idx = np.argsort(-distance, axis=1)\n #\n # max_label = gallery_label[arg_idx[:, :k]]\n # match_matrix = np.equal(max_label, probe_label[:,np.newaxis])\n #\n # tp_seen = match_matrix * np.cumsum(match_matrix, axis=1)\n # ap = np.sum(tp_seen / np.arange(1, k + 1)[np.newaxis, :], axis=1) / gtp\n # MAP = np.mean(ap)\n\n ### Calculate EER\n dist_list = []\n label_list = []\n code_list = []\n while wrapper.samples_left > 0:\n imgs, lbls = wrapper.get_next_batch(100)\n\n imgs = np.reshape(imgs, [-1, 512])\n\n eer_dict = {\n embedding_tensor: imgs\n }\n\n code, int_code = sess.run([h_norm, h], feed_dict=eer_dict)\n code = np.reshape(code, [-1, 2, m])\n\n distance = np.sum(np.prod(code, axis=1), axis=1)\n\n if dist_list == []:\n dist_list = distance\n label_list = lbls\n code_list = int_code\n\n else:\n dist_list = np.concatenate((dist_list, distance), axis=0)\n label_list = np.concatenate((label_list, lbls), axis=0)\n code_list = np.concatenate((code_list, int_code), axis=0)\n\n wrapper.samples_left= np.size(wrapper.labels, axis=0)\n wrapper.next_batch_pointer = 0\n\n fpr, tpr, threshold = roc_curve(label_list, dist_list, pos_label=1)\n fnr = 1 - tpr\n # eer_threshold = threshold(np.nanargmin(np.absolute((fnr - fpr))))\n eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]\n\n ### Code frequency\n code_arr = np.around(code_list)\n count_arr = []\n for i in range(q):\n count_arr.append(np.count_nonzero(code_arr == i + 1))\n\n plt.clf()\n plt.bar(range(1, q + 1), count_arr)\n plt.savefig('./plt/code_' + str(iteration) + '.png')\n\n time_taken = time.time() - start_time\n MAP = 0\n # print(\"good\")\n print(\"[Iteration %d] Train Loss: %.4f, Loss_l: %.4f, Loss_c: %.4f, MAP: %.4f, EER: %.4f, Taken time: %.4f\"\n % (iteration, train_loss, loss_l, loss_c, MAP, eer, time_taken))\n\n start_time = time.time()\n\n # np.save('CP.npy', np.concatenate((fpr[np.newaxis, :], tpr[np.newaxis, :]), axis=0))\n ### Save model.\n # save_vars = [var for var in t_vars if 'DIom' in var.name]\n # saver = tf.train.Saver(var_list=save_vars)\n # saver.save(sess, './model/DIom_layer')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class LogisticRegression:
<|reserved_special_token_0|>
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
<|reserved_special_token_0|>
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
<|reserved_special_token_0|>
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LogisticRegression:
<|reserved_special_token_0|>
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('TkAgg')
<|reserved_special_token_0|>
class LogisticRegression:
"""LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
"""
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0, 1]]
y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class LogisticRegression:
"""LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
"""
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert w.shape == (dim, 1)
assert isinstance(b, float) or isinstance(b, int)
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert dw.shape == w.shape
assert db.dtype == float
cost = np.squeeze(cost)
assert cost.shape == ()
grads = {'dw': dw, 'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter + 1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f' % (i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.
max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return ('LogisticRegression Model(learning_rate={}, max_iteration={})'
.format(self.learning_rate, self.max_iter))
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0, 1]]
y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5, wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class LogisticRegression:
'''LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
'''
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {'dw': dw,
'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter+1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f'%(i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return 'LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter)
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0,1]]
# x = np.hstack([np.ones((100, 1)), x])
y = np.array([1 if i > 0 else 0 for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (- model.b - model.w[0]*x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5,wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
|
flexible
|
{
"blob_id": "1dd62264aafe8ee745a3cfdfb994ac6a40c1af42",
"index": 1848,
"step-1": "<mask token>\n\n\nclass LogisticRegression:\n <mask token>\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n <mask token>\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n <mask token>\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LogisticRegression:\n <mask token>\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\n<mask token>\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\n\n\nclass LogisticRegression:\n \"\"\"LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n \"\"\"\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0, 1]]\n y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n plt.subplots_adjust(hspace=0.5, wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()\n",
"step-4": "import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\nclass LogisticRegression:\n \"\"\"LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n \"\"\"\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0, 1]]\n y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n plt.subplots_adjust(hspace=0.5, wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()\n",
"step-5": "import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\n\nclass LogisticRegression:\n '''LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n '''\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'.format(\n self.learning_rate, self.max_iter))\n\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n\n assert (w.shape == (dim, 1))\n assert (isinstance(b, float) or isinstance(b, int))\n\n return w, b \n\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y) \n\n assert (dw.shape == w.shape)\n assert (db.dtype == float)\n cost = np.squeeze(cost)\n assert (cost.shape == ())\n grads = {'dw': dw,\n 'db': db}\n\n return grads, cost\n\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter+1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f'%(i, cost))\n return w, b, costs\n\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n\n def __str__(self):\n return 'LogisticRegression Model(learning_rate={}, max_iteration={})'.format(\n self.learning_rate, self.max_iter)\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0,1]]\n # x = np.hstack([np.ones((100, 1)), x])\n y = np.array([1 if i > 0 else 0 for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (- model.b - model.w[0]*x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n \n plt.subplots_adjust(hspace=0.5,wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
from Stack import Stack
from Regex import Regex
from Symbol import Symbol
class Postfix:
def __init__(self, regex):
self.__regex = regex.expression
self.__modr = Postfix.modRegex(self.__regex)
self.__pila = Stack()
self.__postfix = self.convertInfixToPostfix()
def getRegex(self):
return self.__regex
def getExtendedRegex(self):
return self.__extended
def getModifiedRegex(self):
return self.__modr
def getPostfix(self):
return self.__postfix
@staticmethod
def isConcat(character, nextCharacter):
if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):
return True
elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):
return True
elif Symbol.isStar(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(nextCharacter):
return True
elif Symbol.isRightParenthesis(character) and nextCharacter == "#":
return True
elif Symbol.isRightParenthesis(character) and Symbol.isOperand(nextCharacter):
return True
else:
return False
@staticmethod
def modRegex(reg):
list = [char for char in reg+'$']
nlist = []
for i in range(len(list)-1):
if Postfix.isConcat(list[i], list[i+1]) and list[i+1] != '$':
nlist.append(list[i])
nlist.append('.')
elif(list[i] != list[-1] and list[i+1] != '$'):
nlist.append(list[i])
else:
nlist.append(list[i])
return "".join(nlist)
def convertInfixToPostfix(self):
self.__pila.push('(')
tempr = self.__modr+')'
auxpost = ""
for i in range(len(tempr)):
if Symbol.isOperand(tempr[i]):
auxpost += tempr[i]
elif Symbol.isLeftParenthesis(tempr[i]):
self.__pila.push(tempr[i])
elif Symbol.isOperator(tempr[i]):
while not self.__pila.isEmpty() and Symbol.isOperator(self.__pila.peek()) and (Symbol.checkPrecedence(self.__pila.peek()) >= Symbol.checkPrecedence(tempr[i])):
auxpost += self.__pila.pop()
self.__pila.push(tempr[i])
elif Symbol.isRightParenthesis(tempr[i]):
while not self.__pila.isEmpty() and not Symbol.isLeftParenthesis(self.__pila.peek()):
auxpost += self.__pila.pop()
self.__pila.pop()
return auxpost
|
normal
|
{
"blob_id": "acc39044fa1ae444dd4a737ea37a0baa60a2c7bd",
"index": 4040,
"step-1": "<mask token>\n\n\nclass Postfix:\n\n def __init__(self, regex):\n self.__regex = regex.expression\n self.__modr = Postfix.modRegex(self.__regex)\n self.__pila = Stack()\n self.__postfix = self.convertInfixToPostfix()\n <mask token>\n <mask token>\n <mask token>\n\n def getPostfix(self):\n return self.__postfix\n\n @staticmethod\n def isConcat(character, nextCharacter):\n if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and nextCharacter == '#':\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isOperand(\n nextCharacter):\n return True\n else:\n return False\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Postfix:\n\n def __init__(self, regex):\n self.__regex = regex.expression\n self.__modr = Postfix.modRegex(self.__regex)\n self.__pila = Stack()\n self.__postfix = self.convertInfixToPostfix()\n\n def getRegex(self):\n return self.__regex\n <mask token>\n <mask token>\n\n def getPostfix(self):\n return self.__postfix\n\n @staticmethod\n def isConcat(character, nextCharacter):\n if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and nextCharacter == '#':\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isOperand(\n nextCharacter):\n return True\n else:\n return False\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Postfix:\n\n def __init__(self, regex):\n self.__regex = regex.expression\n self.__modr = Postfix.modRegex(self.__regex)\n self.__pila = Stack()\n self.__postfix = self.convertInfixToPostfix()\n\n def getRegex(self):\n return self.__regex\n\n def getExtendedRegex(self):\n return self.__extended\n\n def getModifiedRegex(self):\n return self.__modr\n\n def getPostfix(self):\n return self.__postfix\n\n @staticmethod\n def isConcat(character, nextCharacter):\n if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and nextCharacter == '#':\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isOperand(\n nextCharacter):\n return True\n else:\n return False\n <mask token>\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Postfix:\n\n def __init__(self, regex):\n self.__regex = regex.expression\n self.__modr = Postfix.modRegex(self.__regex)\n self.__pila = Stack()\n self.__postfix = self.convertInfixToPostfix()\n\n def getRegex(self):\n return self.__regex\n\n def getExtendedRegex(self):\n return self.__extended\n\n def getModifiedRegex(self):\n return self.__modr\n\n def getPostfix(self):\n return self.__postfix\n\n @staticmethod\n def isConcat(character, nextCharacter):\n if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(\n nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and nextCharacter == '#':\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isOperand(\n nextCharacter):\n return True\n else:\n return False\n\n @staticmethod\n def modRegex(reg):\n list = [char for char in reg + '$']\n nlist = []\n for i in range(len(list) - 1):\n if Postfix.isConcat(list[i], list[i + 1]) and list[i + 1] != '$':\n nlist.append(list[i])\n nlist.append('.')\n elif list[i] != list[-1] and list[i + 1] != '$':\n nlist.append(list[i])\n else:\n nlist.append(list[i])\n return ''.join(nlist)\n\n def convertInfixToPostfix(self):\n self.__pila.push('(')\n tempr = self.__modr + ')'\n auxpost = ''\n for i in range(len(tempr)):\n if Symbol.isOperand(tempr[i]):\n auxpost += tempr[i]\n elif Symbol.isLeftParenthesis(tempr[i]):\n self.__pila.push(tempr[i])\n elif Symbol.isOperator(tempr[i]):\n while not self.__pila.isEmpty() and Symbol.isOperator(self.\n __pila.peek()) and Symbol.checkPrecedence(self.__pila.\n peek()) >= Symbol.checkPrecedence(tempr[i]):\n auxpost += self.__pila.pop()\n self.__pila.push(tempr[i])\n elif Symbol.isRightParenthesis(tempr[i]):\n while not self.__pila.isEmpty(\n ) and not Symbol.isLeftParenthesis(self.__pila.peek()):\n auxpost += self.__pila.pop()\n self.__pila.pop()\n return auxpost\n",
"step-5": "from Stack import Stack\nfrom Regex import Regex\nfrom Symbol import Symbol\n\nclass Postfix:\n def __init__(self, regex):\n self.__regex = regex.expression\n self.__modr = Postfix.modRegex(self.__regex)\n self.__pila = Stack()\n self.__postfix = self.convertInfixToPostfix()\n \n def getRegex(self):\n return self.__regex\n \n def getExtendedRegex(self):\n return self.__extended\n\n def getModifiedRegex(self):\n return self.__modr\n\n def getPostfix(self):\n return self.__postfix\n\n @staticmethod\n def isConcat(character, nextCharacter):\n if Symbol.isOperand(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isLeftParenthesis(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isOperand(nextCharacter):\n return True\n elif Symbol.isStar(character) and Symbol.isLeftParenthesis(nextCharacter):\n return True\n elif Symbol.isOperand(character) and Symbol.isLeftParenthesis(nextCharacter):\n return True\n elif Symbol.isRightParenthesis(character) and nextCharacter == \"#\":\n return True\n elif Symbol.isRightParenthesis(character) and Symbol.isOperand(nextCharacter):\n return True\n else:\n return False\n\n @staticmethod\n def modRegex(reg):\n list = [char for char in reg+'$']\n nlist = []\n for i in range(len(list)-1):\n if Postfix.isConcat(list[i], list[i+1]) and list[i+1] != '$':\n nlist.append(list[i])\n nlist.append('.')\n elif(list[i] != list[-1] and list[i+1] != '$'):\n nlist.append(list[i])\n else:\n nlist.append(list[i])\n return \"\".join(nlist)\n\n def convertInfixToPostfix(self):\n self.__pila.push('(')\n tempr = self.__modr+')'\n auxpost = \"\"\n for i in range(len(tempr)):\n if Symbol.isOperand(tempr[i]):\n auxpost += tempr[i]\n elif Symbol.isLeftParenthesis(tempr[i]):\n self.__pila.push(tempr[i])\n elif Symbol.isOperator(tempr[i]):\n while not self.__pila.isEmpty() and Symbol.isOperator(self.__pila.peek()) and (Symbol.checkPrecedence(self.__pila.peek()) >= Symbol.checkPrecedence(tempr[i])):\n auxpost += self.__pila.pop()\n self.__pila.push(tempr[i])\n elif Symbol.isRightParenthesis(tempr[i]):\n while not self.__pila.isEmpty() and not Symbol.isLeftParenthesis(self.__pila.peek()):\n auxpost += self.__pila.pop()\n self.__pila.pop()\n return auxpost",
"step-ids": [
4,
5,
7,
9,
11
]
}
|
[
4,
5,
7,
9,
11
] |
'''
Created on 14 november 2015
@author: federico
'''
import paho.mqtt.client as mosquitto
import json
import urllib,urllib2
import datetime
import threading
import time
from pygame import mixer
from datetime import timedelta
#ALARM SOUND PATH
alarm_path="/home/pi/SmartBed/Smart_Bed/src/Rooster.wav"
#DWEET&FREEBOARD
thing_name='smart_bed_status'
url_freeboard="https://dweet.io:443/dweet/for/smart_bed_values"
url_status="https://dweet.io:443/get/latest/dweet/for/smart_bed_status"
url_freeboard_qos="https://dweet.io:443/dweet/for/smart_bed_qos"
url_freeboard_sleep_time="https://dweet.io:443/dweet/for/smart_bed_sleep_time"
#THINGSPEAK
url_thingspeak="https://api.thingspeak.com/update"
channel_id="68285"
api_read="XXXXXXXXXXXXXXX"
api_write="ZZZZZZZZZZZZZZZ"
#CONSTANT
soglia=10
broker_ip="127.0.0.1"
smart_alarm_threshold=10 #threshold for the smart alarm:how much movement is needed to ring
sensor_freq=2 #seconds
sensor_MAXVAL=255 #g
#LOCK VARIABLE:this variable is needed to avoid that 2 threads change the status variable
alarm_clock_lock=0
#queue
q=[]
nsamples=10
#status of the system
status=0
mov_tot=0.1
alarm_sensibility=5 #seconds
def on_connect(client,userdata,rc):
print ("connected with result code"+str(rc))
client.subscribe("smart_bed/values", 0)
def on_message(client,userdata,msg):
print "Raspberry receive data Topic:",msg.topic+'\nMessage:'+str(msg.payload)
jsonfile=json.loads(msg.payload)
queue_insert(jsonfile)
def queue_insert(jsonfile):
x=int(jsonfile["e"]["v"]["x"])
y=int(jsonfile["e"]["v"]["y"])
z=int(jsonfile["e"]["v"]["z"])
valore=transform_function(x, y, z)
if(valore>soglia):
q.append(valore-soglia)
print "Value appended in the queue"+str(valore-soglia)
else:
q.append(0)
print "0 appended"
#SENDING DATA TO FREEBOARD LIVE VIEW
values={}
values["x"]=x
values["y"]=y
values["z"]=z
data = urllib.urlencode(values)
req = urllib2.Request(url_freeboard, data)
urllib2.urlopen(req)
def send_data_c(coda):
global mov_tot
somma=0
conta=0
valore=0
for l in coda:
if l!=0:
somma=somma+l
conta=conta+1
if somma!=0:
valore=float(somma)/conta
mov_tot=mov_tot+valore
print "I'm ready to send"+ str(valore)+" to thingspeak"
#sending data to thingspeak movement
params = urllib.urlencode({'api_key': api_write, 'field1': "%.2f" % valore})
req=urllib2.Request(url_thingspeak,params)
urllib2.urlopen(req)
def transform_function(x,y,z):
#PARAMETERS TO SET IN CASE OF SPECIAL INTEREST IN ONE DIRECTION
a=1
b=1
c=1
valore=a*x+b*y+z*c
return valore
def process_data(ore,minu,init_time):
global mov_tot
while(status==1):
if len(q)==nsamples:
coda=q[:]
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST DATA IN THE QUEUE
if len(q)!=0:
coda=q
tr=threading.Thread(target=send_data_c,args=(coda,))
tr.start()
del q[:]
#LAST STATISTICS
i=datetime.datetime.now()
#sleep time in minutes
b=i-init_time
sleep_time=b.seconds/60
print "Passed seconds from the start"+str(b.seconds)
print "Total movement"+str(mov_tot)
#MYFUNCTION TO QUALITY OF SLEEP
qos=-((100*sensor_freq*nsamples*15/(sensor_MAXVAL*3*b.seconds)))*mov_tot+100
#LAST DATA TO FREEBOARD
data = urllib.urlencode({'qos': "%.0f" %qos})
req = urllib2.Request(url_freeboard_qos, data)
urllib2.urlopen(req)
data = urllib.urlencode({'sleep_time':sleep_time})
req = urllib2.Request(url_freeboard_sleep_time, data)
urllib2.urlopen(req)
#LAST DATA TO THINGSPEAK. WHILE CYCLE IS NEEDED BECAUSE DATA ON THINGSPEAK CAN BE UPDATED EACH 15s
resp='0'
times=0
while resp=='0':
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field2': "%.1f" % sleep_time})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
resp='0'
times=0
while(resp=='0'):
time.sleep(times)
params = urllib.urlencode({'api_key': api_write, 'field3': "%.1f" % qos})
req=urllib2.Request(url_thingspeak,params)
risp=urllib2.urlopen(req)
resp=risp.read()
times=times+5
#needed for next measurement
mov_tot=0.1
def alarmclock(h,m):
global alarm_clock_lock
while(status==1):
i=datetime.datetime.now()
if (i.hour==h) & (i.minute==m):
if alarm_clock_lock==0:
#LOCK
alarm_clock_lock=1
print "ALARM FROM BASIC ALARMCLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
#alarm sensibility
time.sleep(alarm_sensibility)
def sound_clock():
mixer.init()
mixer.music.load(alarm_path)
while(status==1):
mixer.music.play()
time.sleep(4)
def smart_alarm(a_h,a_m,ore,minu,smart_min):
#bad thing but signals cannot be managed as a child thread
time_to_wait=abs(a_h-ore)*3600+abs(a_m-abs((minu-smart_min)%60))*60
print "second to sleep"+str(time_to_wait)
time.sleep(time_to_wait)
global mov_tot
initial_mov=mov_tot
while(status==1):
print "mov_tot"+ str(mov_tot)
print "initial_mov"+str(initial_mov)
if((mov_tot-initial_mov)>smart_alarm_threshold):
global alarm_clock_lock
#LOCK
if alarm_clock_lock==0:
alarm_clock_lock=1
print "ALARM FROM SMART CLOCK"
sound_clock()
#UNLOCK
alarm_clock_lock=0
time.sleep(5)
if __name__ == '__main__':
client=mosquitto.Mosquitto("Raspberry")
client.on_connect=on_connect
client.on_message = on_message
client.connect(broker_ip, port=1883, keepalive=60, bind_address="")
client.loop_start()
while(True):
req=urllib2.Request(url_status)
resp=urllib2.urlopen(req)
dweet=resp.read()
dweet2=json.loads(dweet)
stat=dweet2["with"][0]["content"]["status"]
if (stat==1) & (status==0):
status=1
print "System is switched ON"
ore=dweet2["with"][0]["content"]["alarm_hour"]
minu=dweet2["with"][0]["content"]["alarm_min"]
smart_min=dweet2["with"][0]["content"]["smart_alarm"]
init_time=datetime.datetime.now()
actual_hour=init_time.hour
actual_min=init_time.minute
t=threading.Thread(target=process_data,args=(actual_hour,actual_min,init_time))
t.daemon=True
t.start()
l=threading.Thread(target=alarmclock,args=(ore,minu,))
l.daemon=True
l.start()
if(smart_min!=0):
h=threading.Thread(target=smart_alarm,args=(actual_hour,actual_min,ore,minu,smart_min,))
h.daemon=True
h.start()
diz={}
diz["status"]=1
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
elif (stat==0) & (status==1):
diz={}
diz["status"]=0
val=client.publish("smart_bed",json.dumps(diz) , qos=1)
status=0
print "System is switched OFF"
time.sleep(2)
client.loop_stop()
|
normal
|
{
"blob_id": "05bd95966d72dd40b9b828932b0bf70e40ddb573",
"index": 1511,
"step-1": "'''\nCreated on 14 november 2015\n\n@author: federico\n'''\n\nimport paho.mqtt.client as mosquitto\nimport json\nimport urllib,urllib2\nimport datetime\nimport threading\nimport time\nfrom pygame import mixer\nfrom datetime import timedelta\n\n\n#ALARM SOUND PATH\nalarm_path=\"/home/pi/SmartBed/Smart_Bed/src/Rooster.wav\"\n\n#DWEET&FREEBOARD \nthing_name='smart_bed_status'\nurl_freeboard=\"https://dweet.io:443/dweet/for/smart_bed_values\"\nurl_status=\"https://dweet.io:443/get/latest/dweet/for/smart_bed_status\"\nurl_freeboard_qos=\"https://dweet.io:443/dweet/for/smart_bed_qos\"\nurl_freeboard_sleep_time=\"https://dweet.io:443/dweet/for/smart_bed_sleep_time\"\n\n#THINGSPEAK\nurl_thingspeak=\"https://api.thingspeak.com/update\"\nchannel_id=\"68285\"\napi_read=\"XXXXXXXXXXXXXXX\"\napi_write=\"ZZZZZZZZZZZZZZZ\"\n\n#CONSTANT\nsoglia=10\nbroker_ip=\"127.0.0.1\"\nsmart_alarm_threshold=10 #threshold for the smart alarm:how much movement is needed to ring\nsensor_freq=2 #seconds\nsensor_MAXVAL=255 #g\n\n#LOCK VARIABLE:this variable is needed to avoid that 2 threads change the status variable\nalarm_clock_lock=0\n\n#queue\nq=[]\nnsamples=10\n\n#status of the system\nstatus=0\nmov_tot=0.1\nalarm_sensibility=5 #seconds\n\n\ndef on_connect(client,userdata,rc):\n print (\"connected with result code\"+str(rc))\n client.subscribe(\"smart_bed/values\", 0)\n \n \ndef on_message(client,userdata,msg):\n print \"Raspberry receive data Topic:\",msg.topic+'\\nMessage:'+str(msg.payload)\n jsonfile=json.loads(msg.payload)\n \n queue_insert(jsonfile)\n\ndef queue_insert(jsonfile):\n \n x=int(jsonfile[\"e\"][\"v\"][\"x\"])\n y=int(jsonfile[\"e\"][\"v\"][\"y\"])\n z=int(jsonfile[\"e\"][\"v\"][\"z\"])\n \n valore=transform_function(x, y, z) \n \n if(valore>soglia):\n q.append(valore-soglia)\n print \"Value appended in the queue\"+str(valore-soglia)\n else:\n q.append(0)\n print \"0 appended\"\n \n #SENDING DATA TO FREEBOARD LIVE VIEW\n values={}\n values[\"x\"]=x\n values[\"y\"]=y\n values[\"z\"]=z\n data = urllib.urlencode(values)\n req = urllib2.Request(url_freeboard, data)\n urllib2.urlopen(req)\n\n \ndef send_data_c(coda): \n global mov_tot\n somma=0\n conta=0\n valore=0\n for l in coda:\n if l!=0:\n somma=somma+l \n conta=conta+1\n \n if somma!=0: \n valore=float(somma)/conta\n mov_tot=mov_tot+valore\n print \"I'm ready to send\"+ str(valore)+\" to thingspeak\"\n \n #sending data to thingspeak movement\n params = urllib.urlencode({'api_key': api_write, 'field1': \"%.2f\" % valore})\n req=urllib2.Request(url_thingspeak,params)\n urllib2.urlopen(req)\n\n \ndef transform_function(x,y,z):\n\n #PARAMETERS TO SET IN CASE OF SPECIAL INTEREST IN ONE DIRECTION\n a=1\n b=1\n c=1\n valore=a*x+b*y+z*c\n return valore\n \ndef process_data(ore,minu,init_time):\n \n global mov_tot\n \n while(status==1):\n if len(q)==nsamples:\n coda=q[:]\n \n tr=threading.Thread(target=send_data_c,args=(coda,))\n tr.start()\n del q[:]\n\n #LAST DATA IN THE QUEUE\n \n if len(q)!=0:\n coda=q\n tr=threading.Thread(target=send_data_c,args=(coda,))\n tr.start()\n del q[:]\n \n #LAST STATISTICS\n\n i=datetime.datetime.now() \n \n #sleep time in minutes\n b=i-init_time\n sleep_time=b.seconds/60\n \n print \"Passed seconds from the start\"+str(b.seconds)\n print \"Total movement\"+str(mov_tot) \n \n #MYFUNCTION TO QUALITY OF SLEEP\n qos=-((100*sensor_freq*nsamples*15/(sensor_MAXVAL*3*b.seconds)))*mov_tot+100\n \n #LAST DATA TO FREEBOARD\n \n data = urllib.urlencode({'qos': \"%.0f\" %qos})\n req = urllib2.Request(url_freeboard_qos, data)\n urllib2.urlopen(req)\n\n data = urllib.urlencode({'sleep_time':sleep_time})\n req = urllib2.Request(url_freeboard_sleep_time, data)\n urllib2.urlopen(req)\n\n #LAST DATA TO THINGSPEAK. WHILE CYCLE IS NEEDED BECAUSE DATA ON THINGSPEAK CAN BE UPDATED EACH 15s\n resp='0'\n times=0\n while resp=='0':\n time.sleep(times)\n \tparams = urllib.urlencode({'api_key': api_write, 'field2': \"%.1f\" % sleep_time})\n \treq=urllib2.Request(url_thingspeak,params)\n \trisp=urllib2.urlopen(req)\n \tresp=risp.read()\n \ttimes=times+5\n\n resp='0'\n times=0\n while(resp=='0'):\n\ttime.sleep(times)\n\tparams = urllib.urlencode({'api_key': api_write, 'field3': \"%.1f\" % qos})\n \treq=urllib2.Request(url_thingspeak,params)\n \trisp=urllib2.urlopen(req)\n \tresp=risp.read()\n\ttimes=times+5\n\n \n \n #needed for next measurement\n mov_tot=0.1\n \ndef alarmclock(h,m):\n global alarm_clock_lock\n \n while(status==1):\n i=datetime.datetime.now() \n if (i.hour==h) & (i.minute==m):\n if alarm_clock_lock==0:\n #LOCK\n alarm_clock_lock=1\n print \"ALARM FROM BASIC ALARMCLOCK\"\n sound_clock()\n #UNLOCK\n alarm_clock_lock=0\n \n #alarm sensibility \n time.sleep(alarm_sensibility) \n \ndef sound_clock():\n \n mixer.init()\n mixer.music.load(alarm_path)\n while(status==1):\n mixer.music.play()\n time.sleep(4)\n \ndef smart_alarm(a_h,a_m,ore,minu,smart_min):\n #bad thing but signals cannot be managed as a child thread \n time_to_wait=abs(a_h-ore)*3600+abs(a_m-abs((minu-smart_min)%60))*60\n print \"second to sleep\"+str(time_to_wait)\n time.sleep(time_to_wait)\n \n global mov_tot\n initial_mov=mov_tot\n \n while(status==1):\n print \"mov_tot\"+ str(mov_tot)\n print \"initial_mov\"+str(initial_mov)\n if((mov_tot-initial_mov)>smart_alarm_threshold): \n global alarm_clock_lock\n #LOCK\n if alarm_clock_lock==0:\n alarm_clock_lock=1\n print \"ALARM FROM SMART CLOCK\"\n sound_clock()\n #UNLOCK\n alarm_clock_lock=0\n \n time.sleep(5)\n \n \n \nif __name__ == '__main__':\n \n client=mosquitto.Mosquitto(\"Raspberry\")\n client.on_connect=on_connect\n client.on_message = on_message\n \n client.connect(broker_ip, port=1883, keepalive=60, bind_address=\"\") \n client.loop_start() \n \n while(True):\n \n req=urllib2.Request(url_status)\n resp=urllib2.urlopen(req)\n dweet=resp.read()\n dweet2=json.loads(dweet)\n stat=dweet2[\"with\"][0][\"content\"][\"status\"]\n \n if (stat==1) & (status==0):\n \n status=1\n print \"System is switched ON\"\n ore=dweet2[\"with\"][0][\"content\"][\"alarm_hour\"]\n minu=dweet2[\"with\"][0][\"content\"][\"alarm_min\"]\n smart_min=dweet2[\"with\"][0][\"content\"][\"smart_alarm\"]\n \n \n\t init_time=datetime.datetime.now() \n actual_hour=init_time.hour\n actual_min=init_time.minute\n \n t=threading.Thread(target=process_data,args=(actual_hour,actual_min,init_time))\n t.daemon=True\n t.start()\n \n l=threading.Thread(target=alarmclock,args=(ore,minu,))\n l.daemon=True\n l.start()\n \n if(smart_min!=0):\n h=threading.Thread(target=smart_alarm,args=(actual_hour,actual_min,ore,minu,smart_min,))\n h.daemon=True\n h.start()\n \n diz={}\n diz[\"status\"]=1\n val=client.publish(\"smart_bed\",json.dumps(diz) , qos=1)\n \n elif (stat==0) & (status==1):\n \n \n diz={}\n diz[\"status\"]=0\n val=client.publish(\"smart_bed\",json.dumps(diz) , qos=1)\n status=0\n \n\n print \"System is switched OFF\"\n \n time.sleep(2)\n \n client.loop_stop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
flags =[]
sourcefiles:str = []
headerfiles:str = []
mainfile:str = ""
outfilename = "a.out"
assemblyfilename = "a.asm"
includedfilenames = []
class Variables:
size:bytes
name:str
class cstruct:
structname:string
def claprocessor():
print(sys.argv)
i=0
for stri in sys.argv:
if stri.__eq__("-o"):
outfilename=sys.argv(i+1)
if stri.__eq__("-ASM") or stri.__eq__("-asm") :
assemblyfilename = sys.argv(i+1)
if stri.__contains__(".c"):
sourcefiles.append(stri)
if stri.__contains__(".h"):
headerfiles.append(stri)
i += 1
return
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
if __name__ == '__main__':
claprocessor()
|
normal
|
{
"blob_id": "24187284ff3e03cf79b8545415005c71f9355ddc",
"index": 9062,
"step-1": "<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\n<mask token>\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\nsourcefiles: str = []\nheaderfiles: str = []\nmainfile: str = ''\n<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-4": "flags = []\nsourcefiles: str = []\nheaderfiles: str = []\nmainfile: str = ''\noutfilename = 'a.out'\nassemblyfilename = 'a.asm'\nincludedfilenames = []\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-5": "flags =[]\nsourcefiles:str = []\nheaderfiles:str = []\nmainfile:str = \"\"\noutfilename = \"a.out\"\nassemblyfilename = \"a.asm\"\nincludedfilenames = []\n\n\nclass Variables:\n size:bytes\n name:str\n\n\nclass cstruct:\n structname:string\n\n\ndef claprocessor():\n print(sys.argv)\n i=0\n for stri in sys.argv:\n if stri.__eq__(\"-o\"):\n outfilename=sys.argv(i+1)\n if stri.__eq__(\"-ASM\") or stri.__eq__(\"-asm\") :\n assemblyfilename = sys.argv(i+1)\n if stri.__contains__(\".c\"):\n sourcefiles.append(stri)\n if stri.__contains__(\".h\"):\n headerfiles.append(stri)\n\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.conf import settings
from .base import *
import os
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
ROOT_URLCONF = 'floweryroad.urls.docker_production'
ALLOWED_HOSTS = [os.environ['WEB_HOST']]
CORS_ORIGIN_WHITELIST = [os.environ['CORS']]
DATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT']}}
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = os.environ['MEDIA']
|
normal
|
{
"blob_id": "ab35684166f07a3ab9e64f2ff98980e25a3fc576",
"index": 1643,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDEBUG = True\nSECRET_KEY = os.environ['SECRET_KEY']\nROOT_URLCONF = 'floweryroad.urls.docker_production'\nALLOWED_HOSTS = [os.environ['WEB_HOST']]\nCORS_ORIGIN_WHITELIST = [os.environ['CORS']]\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'],\n 'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': os.environ['DB_HOST'],\n 'PORT': os.environ['DB_PORT']}}\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = os.environ['MEDIA']\n",
"step-3": "from django.conf import settings\nfrom .base import *\nimport os\nDEBUG = True\nSECRET_KEY = os.environ['SECRET_KEY']\nROOT_URLCONF = 'floweryroad.urls.docker_production'\nALLOWED_HOSTS = [os.environ['WEB_HOST']]\nCORS_ORIGIN_WHITELIST = [os.environ['CORS']]\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'],\n 'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': os.environ['DB_HOST'],\n 'PORT': os.environ['DB_PORT']}}\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = os.environ['MEDIA']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
raw = np.load("raw_with_freq.npy").item()
for i in list(raw.keys()):
if len(i) > 8:
del(raw[i])
print(raw)
print(len(list(raw.keys())))
np.save("shorten_raw_with_freq.npy", raw)
|
normal
|
{
"blob_id": "ffb17b370c892696b341f6d37a2cfe106a5670a5",
"index": 4265,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-3": "<mask token>\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-4": "import numpy as np\nraw = np.load('raw_with_freq.npy').item()\nfor i in list(raw.keys()):\n if len(i) > 8:\n del raw[i]\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save('shorten_raw_with_freq.npy', raw)\n",
"step-5": "import numpy as np\nraw = np.load(\"raw_with_freq.npy\").item()\nfor i in list(raw.keys()):\n\tif len(i) > 8:\n\t\tdel(raw[i])\nprint(raw)\nprint(len(list(raw.keys())))\nnp.save(\"shorten_raw_with_freq.npy\", raw)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
g7 = int(input())
h7 = g7 / 2
i = g7 - 1
print(int(h7 * i))
|
normal
|
{
"blob_id": "abb08956f55fd1e8af27ce12fa94a4137d7d908e",
"index": 7251,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(h7 * i))\n",
"step-3": "g7 = int(input())\nh7 = g7 / 2\ni = g7 - 1\nprint(int(h7 * i))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import functools
import re
import nltk.data
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.tokenize import RegexpTokenizer
def clean(string):
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\\\(", " \( ", string)
string = re.sub(r"\\\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\]\]", "", string)
string = re.sub(r"\n", "", string)
string = string.rstrip()
string = remove_text_inside_brackets(string, "(){}[]")
return string.strip()
def remove_text_inside_brackets(text, brackets):
count = [0] * (len(brackets) // 2) # count open/close brackets
saved_chars = []
for character in text:
for i, b in enumerate(brackets):
if character == b: # found bracket
kind, is_close = divmod(i, 2)
count[kind] += (-1) ** is_close # `+1`: open, `-1`: close
if count[kind] < 0: # unbalanced bracket
count[kind] = 0 # keep it
else: # found bracket to remove
break
else: # character is not a [balanced] bracket
if not any(count): # outside brackets
saved_chars.append(character)
return ''.join(saved_chars)
def reorder_sentences(output_sentences, input):
def custom_sort(s1, s2):
return input.find(s1) - input.find(s2)
output_sentences.sort(key=functools.cmp_to_key(custom_sort))
return output_sentences
def get_summarized(input, num_sentences):
input = clean(input)
tokenizer = RegexpTokenizer('\w+')
base_words = [word.lower() for word in tokenizer.tokenize(input)]
words = [word for word in base_words if word not in stopwords.words()]
word_frequencies = FreqDist(words)
most_frequent_words = [pair[0] for pair in word_frequencies.most_common(100)]
input = remove_text_inside_brackets(input, "====")
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
actual_sentences_pre = sent_detector.tokenize(input)
actual_sentences = []
for sentence in actual_sentences_pre:
if len(sentence.split()) <= 6:
continue
else:
actual_sentences.append(sentence)
working_sentences = [sentence.lower() for sentence in actual_sentences]
output_sentences = []
for word in most_frequent_words:
for i in range(0, len(working_sentences)):
if word in working_sentences[i] and actual_sentences[i] not in output_sentences:
output_sentences.append(actual_sentences[i])
break
if len(output_sentences) >= num_sentences:
break
if len(output_sentences) >= num_sentences:
break
for sentence in output_sentences:
sentence.capitalize()
return reorder_sentences(output_sentences, input)
def summarize(input, num_sentences):
return " ".join(get_summarized(input, num_sentences))
|
normal
|
{
"blob_id": "837e84d4a58d8fd0d0ffc24973d196ae57f9a260",
"index": 1723,
"step-1": "<mask token>\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\n<mask token>\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-3": "<mask token>\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common\n (100)]\n input = remove_text_inside_brackets(input, '====')\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i\n ] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-4": "import functools\nimport re\nimport nltk.data\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom nltk.tokenize import RegexpTokenizer\n\n\ndef clean(string):\n string = re.sub(\"\\\\'s\", \" 's\", string)\n string = re.sub(\"\\\\'ve\", \" 've\", string)\n string = re.sub(\"n\\\\'t\", \" n't\", string)\n string = re.sub(\"\\\\'re\", \" 're\", string)\n string = re.sub(\"\\\\'d\", \" 'd\", string)\n string = re.sub(\"\\\\'ll\", \" 'll\", string)\n string = re.sub('!', ' ! ', string)\n string = re.sub('\\\\\\\\\\\\(', ' \\\\( ', string)\n string = re.sub('\\\\\\\\\\\\)', ' \\\\) ', string)\n string = re.sub('\\\\?', ' \\\\? ', string)\n string = re.sub('\\\\]\\\\]', '', string)\n string = re.sub('\\\\n', '', string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, '(){}[]')\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2)\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b:\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close\n if count[kind] < 0:\n count[kind] = 0\n else:\n break\n else:\n if not any(count):\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common\n (100)]\n input = remove_text_inside_brackets(input, '====')\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i\n ] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return ' '.join(get_summarized(input, num_sentences))\n",
"step-5": "import functools\nimport re\nimport nltk.data\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nfrom nltk.tokenize import RegexpTokenizer\n\n\ndef clean(string):\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\\\\\(\", \" \\( \", string)\n string = re.sub(r\"\\\\\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\]\\]\", \"\", string)\n string = re.sub(r\"\\n\", \"\", string)\n string = string.rstrip()\n string = remove_text_inside_brackets(string, \"(){}[]\")\n return string.strip()\n\n\ndef remove_text_inside_brackets(text, brackets):\n count = [0] * (len(brackets) // 2) # count open/close brackets\n saved_chars = []\n for character in text:\n for i, b in enumerate(brackets):\n if character == b: # found bracket\n kind, is_close = divmod(i, 2)\n count[kind] += (-1) ** is_close # `+1`: open, `-1`: close\n if count[kind] < 0: # unbalanced bracket\n count[kind] = 0 # keep it\n else: # found bracket to remove\n break\n else: # character is not a [balanced] bracket\n if not any(count): # outside brackets\n saved_chars.append(character)\n return ''.join(saved_chars)\n\n\ndef reorder_sentences(output_sentences, input):\n def custom_sort(s1, s2):\n return input.find(s1) - input.find(s2)\n\n output_sentences.sort(key=functools.cmp_to_key(custom_sort))\n return output_sentences\n\n\ndef get_summarized(input, num_sentences):\n input = clean(input)\n tokenizer = RegexpTokenizer('\\w+')\n base_words = [word.lower() for word in tokenizer.tokenize(input)]\n words = [word for word in base_words if word not in stopwords.words()]\n word_frequencies = FreqDist(words)\n most_frequent_words = [pair[0] for pair in word_frequencies.most_common(100)]\n\n input = remove_text_inside_brackets(input, \"====\")\n\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n actual_sentences_pre = sent_detector.tokenize(input)\n actual_sentences = []\n for sentence in actual_sentences_pre:\n if len(sentence.split()) <= 6:\n continue\n else:\n actual_sentences.append(sentence)\n working_sentences = [sentence.lower() for sentence in actual_sentences]\n output_sentences = []\n\n for word in most_frequent_words:\n for i in range(0, len(working_sentences)):\n if word in working_sentences[i] and actual_sentences[i] not in output_sentences:\n output_sentences.append(actual_sentences[i])\n break\n if len(output_sentences) >= num_sentences:\n break\n\n if len(output_sentences) >= num_sentences:\n break\n for sentence in output_sentences:\n sentence.capitalize()\n return reorder_sentences(output_sentences, input)\n\n\ndef summarize(input, num_sentences):\n return \" \".join(get_summarized(input, num_sentences))\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Documents(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey('users.username'))
owner = relationship('User', back_populates='documents')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Documents(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey('users.username'))
owner = relationship('User', back_populates='documents')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(Base):
__tablename__ = 'users'
username = Column(String, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
name = Column(String, index=True)
last_name = Column(String, index=True)
celular = Column(String, index=True)
user_password = Column(String)
documents = relationship('Documents', back_populates='owner')
class Documents(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey('users.username'))
owner = relationship('User', back_populates='documents')
<|reserved_special_token_1|>
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DATE
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = 'users'
username = Column(String, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
name = Column(String, index=True)
last_name = Column(String, index=True)
celular = Column(String, index=True)
user_password = Column(String)
documents = relationship('Documents', back_populates='owner')
class Documents(Base):
__tablename__ = 'documents'
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey('users.username'))
owner = relationship('User', back_populates='documents')
<|reserved_special_token_1|>
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DATE
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = "users"
username = Column(String, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
name = Column(String, index=True)
last_name = Column(String, index=True)
celular = Column(String, index=True)
user_password = Column(String)
documents = relationship("Documents", back_populates="owner")
class Documents(Base):
__tablename__ = "documents"
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey("users.username"))
owner = relationship("User", back_populates="documents")
|
flexible
|
{
"blob_id": "acf69cd714f04aeceb4be39b8a7b2bc5d77cd69f",
"index": 3307,
"step-1": "<mask token>\n\n\nclass Documents(Base):\n __tablename__ = 'documents'\n id = Column(Integer, primary_key=True, index=True)\n name_doc = Column(String, index=True)\n exp = Column(DATE, index=True)\n notif = Column(Integer)\n descrip = Column(String, index=True)\n owner_username = Column(String, ForeignKey('users.username'))\n owner = relationship('User', back_populates='documents')\n",
"step-2": "<mask token>\n\n\nclass User(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Documents(Base):\n __tablename__ = 'documents'\n id = Column(Integer, primary_key=True, index=True)\n name_doc = Column(String, index=True)\n exp = Column(DATE, index=True)\n notif = Column(Integer)\n descrip = Column(String, index=True)\n owner_username = Column(String, ForeignKey('users.username'))\n owner = relationship('User', back_populates='documents')\n",
"step-3": "<mask token>\n\n\nclass User(Base):\n __tablename__ = 'users'\n username = Column(String, primary_key=True, index=True)\n email = Column(String, unique=True, index=True)\n name = Column(String, index=True)\n last_name = Column(String, index=True)\n celular = Column(String, index=True)\n user_password = Column(String)\n documents = relationship('Documents', back_populates='owner')\n\n\nclass Documents(Base):\n __tablename__ = 'documents'\n id = Column(Integer, primary_key=True, index=True)\n name_doc = Column(String, index=True)\n exp = Column(DATE, index=True)\n notif = Column(Integer)\n descrip = Column(String, index=True)\n owner_username = Column(String, ForeignKey('users.username'))\n owner = relationship('User', back_populates='documents')\n",
"step-4": "from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DATE\nfrom sqlalchemy.orm import relationship\nfrom database import Base\n\n\nclass User(Base):\n __tablename__ = 'users'\n username = Column(String, primary_key=True, index=True)\n email = Column(String, unique=True, index=True)\n name = Column(String, index=True)\n last_name = Column(String, index=True)\n celular = Column(String, index=True)\n user_password = Column(String)\n documents = relationship('Documents', back_populates='owner')\n\n\nclass Documents(Base):\n __tablename__ = 'documents'\n id = Column(Integer, primary_key=True, index=True)\n name_doc = Column(String, index=True)\n exp = Column(DATE, index=True)\n notif = Column(Integer)\n descrip = Column(String, index=True)\n owner_username = Column(String, ForeignKey('users.username'))\n owner = relationship('User', back_populates='documents')\n",
"step-5": "from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DATE\nfrom sqlalchemy.orm import relationship\n\nfrom database import Base\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n username = Column(String, primary_key=True, index=True)\n email = Column(String, unique=True, index=True)\n name = Column(String, index=True)\n last_name = Column(String, index=True)\n celular = Column(String, index=True)\n user_password = Column(String)\n\n documents = relationship(\"Documents\", back_populates=\"owner\")\n\n\nclass Documents(Base):\n __tablename__ = \"documents\"\n\n id = Column(Integer, primary_key=True, index=True)\n name_doc = Column(String, index=True)\n exp = Column(DATE, index=True)\n notif = Column(Integer)\n descrip = Column(String, index=True)\n owner_username = Column(String, ForeignKey(\"users.username\"))\n\n owner = relationship(\"User\", back_populates=\"documents\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
api_key = '078c8443640961d5ce547c8269db5fd7'
<|reserved_special_token_1|>
# OpenWeatherMap API Key
api_key = "078c8443640961d5ce547c8269db5fd7"
|
flexible
|
{
"blob_id": "4eb3d94a5fd22fc29000ec32475de9cbae1c183a",
"index": 5255,
"step-1": "<mask token>\n",
"step-2": "api_key = '078c8443640961d5ce547c8269db5fd7'\n",
"step-3": "# OpenWeatherMap API Key\napi_key = \"078c8443640961d5ce547c8269db5fd7\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators import BashOperator, DummyOperator
from datetime import datetime, timedelta
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Jaimin',
'depends_on_past': False,
'start_date': datetime.now(),
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'hive_create_part_v1',
default_args=default_args,
schedule_interval="0 1 * * *",
concurrency=1)
# --------------------------------------------------------------------------------
# set tasks
# --------------------------------------------------------------------------------
task = BashOperator(
task_id='hive_create_parition',
bash_command='bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ',
dag=dag)
|
normal
|
{
"blob_id": "49492ad1a1734be02ebefb77095fd560a7a7efd8",
"index": 7155,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndefault_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':\n datetime.now(), 'email': ['airflow@airflow.com'], 'email_on_failure': \n False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(\n minutes=5)}\ndag = DAG('hive_create_part_v1', default_args=default_args,\n schedule_interval='0 1 * * *', concurrency=1)\ntask = BashOperator(task_id='hive_create_parition', bash_command=\n 'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)\n",
"step-3": "import logging\nimport airflow\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators import BashOperator, DummyOperator\nfrom datetime import datetime, timedelta\ndefault_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':\n datetime.now(), 'email': ['airflow@airflow.com'], 'email_on_failure': \n False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(\n minutes=5)}\ndag = DAG('hive_create_part_v1', default_args=default_args,\n schedule_interval='0 1 * * *', concurrency=1)\ntask = BashOperator(task_id='hive_create_parition', bash_command=\n 'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport airflow\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators import BashOperator, DummyOperator\n\nfrom datetime import datetime, timedelta\n\n\n# --------------------------------------------------------------------------------\n# set default arguments\n# --------------------------------------------------------------------------------\n\ndefault_args = {\n 'owner': 'Jaimin',\n 'depends_on_past': False,\n 'start_date': datetime.now(),\n 'email': ['airflow@airflow.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG(\n 'hive_create_part_v1',\n default_args=default_args,\n schedule_interval=\"0 1 * * *\",\n concurrency=1)\n\n# --------------------------------------------------------------------------------\n# set tasks \n# --------------------------------------------------------------------------------\n\ntask = BashOperator(\n task_id='hive_create_parition',\n bash_command='bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ',\n dag=dag)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
"""This is a method to read files, online and local, and cache them"""
import os
from .Read import read as botread
from .Database import db as botdb
class BotNotes():
def __init__(self):
self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes') or dict()
self.dir_to_scan = botread.get_config_dirs("SpiceBot_Release_Notes")
self.load_txt_files(self.dir_to_scan)
self.save_notes()
def save_notes(self):
savenotes = {}
for notefile in list(self.notes.keys()):
savenotes[notefile] = {"old": self.notes[notefile]["old"]}
botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)
def load_txt_files(self, dir_to_scan):
# iterate over files within
for directory in dir_to_scan:
for file in os.listdir(directory):
filepath = os.path.join(directory, file)
filepath = os.path.join(directory, file)
if os.path.isfile(filepath) and filepath.endswith('.txt'):
# gather file stats
slashsplit = str(filepath).split("/")
filename = slashsplit[-1]
filename_base = str(os.path.basename(filename).rsplit('.', 1)[0]).lower()
if filename_base not in list(self.notes.keys()):
self.notes[filename_base] = {}
if "old" not in list(self.notes[filename_base].keys()):
self.notes[filename_base]["old"] = []
if "new" not in list(self.notes[filename_base].keys()):
self.notes[filename_base]["new"] = []
text_file = open(filepath, 'r')
lines = text_file.readlines()
for line in lines:
if str(line) not in self.notes[filename_base]["old"]:
self.notes[filename_base]["new"].append(str(line))
self.notes[filename_base]["old"].append(str(line))
text_file.close()
releasenotes = BotNotes()
|
normal
|
{
"blob_id": "2a062f0c2836850320cdd39eee6a354032ba5c33",
"index": 4565,
"step-1": "<mask token>\n\n\nclass BotNotes:\n\n def __init__(self):\n self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes'\n ) or dict()\n self.dir_to_scan = botread.get_config_dirs('SpiceBot_Release_Notes')\n self.load_txt_files(self.dir_to_scan)\n self.save_notes()\n <mask token>\n\n def load_txt_files(self, dir_to_scan):\n for directory in dir_to_scan:\n for file in os.listdir(directory):\n filepath = os.path.join(directory, file)\n filepath = os.path.join(directory, file)\n if os.path.isfile(filepath) and filepath.endswith('.txt'):\n slashsplit = str(filepath).split('/')\n filename = slashsplit[-1]\n filename_base = str(os.path.basename(filename).rsplit(\n '.', 1)[0]).lower()\n if filename_base not in list(self.notes.keys()):\n self.notes[filename_base] = {}\n if 'old' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['old'] = []\n if 'new' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['new'] = []\n text_file = open(filepath, 'r')\n lines = text_file.readlines()\n for line in lines:\n if str(line) not in self.notes[filename_base]['old']:\n self.notes[filename_base]['new'].append(str(line))\n self.notes[filename_base]['old'].append(str(line))\n text_file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BotNotes:\n\n def __init__(self):\n self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes'\n ) or dict()\n self.dir_to_scan = botread.get_config_dirs('SpiceBot_Release_Notes')\n self.load_txt_files(self.dir_to_scan)\n self.save_notes()\n\n def save_notes(self):\n savenotes = {}\n for notefile in list(self.notes.keys()):\n savenotes[notefile] = {'old': self.notes[notefile]['old']}\n botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)\n\n def load_txt_files(self, dir_to_scan):\n for directory in dir_to_scan:\n for file in os.listdir(directory):\n filepath = os.path.join(directory, file)\n filepath = os.path.join(directory, file)\n if os.path.isfile(filepath) and filepath.endswith('.txt'):\n slashsplit = str(filepath).split('/')\n filename = slashsplit[-1]\n filename_base = str(os.path.basename(filename).rsplit(\n '.', 1)[0]).lower()\n if filename_base not in list(self.notes.keys()):\n self.notes[filename_base] = {}\n if 'old' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['old'] = []\n if 'new' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['new'] = []\n text_file = open(filepath, 'r')\n lines = text_file.readlines()\n for line in lines:\n if str(line) not in self.notes[filename_base]['old']:\n self.notes[filename_base]['new'].append(str(line))\n self.notes[filename_base]['old'].append(str(line))\n text_file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BotNotes:\n\n def __init__(self):\n self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes'\n ) or dict()\n self.dir_to_scan = botread.get_config_dirs('SpiceBot_Release_Notes')\n self.load_txt_files(self.dir_to_scan)\n self.save_notes()\n\n def save_notes(self):\n savenotes = {}\n for notefile in list(self.notes.keys()):\n savenotes[notefile] = {'old': self.notes[notefile]['old']}\n botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)\n\n def load_txt_files(self, dir_to_scan):\n for directory in dir_to_scan:\n for file in os.listdir(directory):\n filepath = os.path.join(directory, file)\n filepath = os.path.join(directory, file)\n if os.path.isfile(filepath) and filepath.endswith('.txt'):\n slashsplit = str(filepath).split('/')\n filename = slashsplit[-1]\n filename_base = str(os.path.basename(filename).rsplit(\n '.', 1)[0]).lower()\n if filename_base not in list(self.notes.keys()):\n self.notes[filename_base] = {}\n if 'old' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['old'] = []\n if 'new' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['new'] = []\n text_file = open(filepath, 'r')\n lines = text_file.readlines()\n for line in lines:\n if str(line) not in self.notes[filename_base]['old']:\n self.notes[filename_base]['new'].append(str(line))\n self.notes[filename_base]['old'].append(str(line))\n text_file.close()\n\n\nreleasenotes = BotNotes()\n",
"step-4": "from __future__ import unicode_literals, absolute_import, division, print_function\n<mask token>\nimport os\nfrom .Read import read as botread\nfrom .Database import db as botdb\n\n\nclass BotNotes:\n\n def __init__(self):\n self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes'\n ) or dict()\n self.dir_to_scan = botread.get_config_dirs('SpiceBot_Release_Notes')\n self.load_txt_files(self.dir_to_scan)\n self.save_notes()\n\n def save_notes(self):\n savenotes = {}\n for notefile in list(self.notes.keys()):\n savenotes[notefile] = {'old': self.notes[notefile]['old']}\n botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)\n\n def load_txt_files(self, dir_to_scan):\n for directory in dir_to_scan:\n for file in os.listdir(directory):\n filepath = os.path.join(directory, file)\n filepath = os.path.join(directory, file)\n if os.path.isfile(filepath) and filepath.endswith('.txt'):\n slashsplit = str(filepath).split('/')\n filename = slashsplit[-1]\n filename_base = str(os.path.basename(filename).rsplit(\n '.', 1)[0]).lower()\n if filename_base not in list(self.notes.keys()):\n self.notes[filename_base] = {}\n if 'old' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['old'] = []\n if 'new' not in list(self.notes[filename_base].keys()):\n self.notes[filename_base]['new'] = []\n text_file = open(filepath, 'r')\n lines = text_file.readlines()\n for line in lines:\n if str(line) not in self.notes[filename_base]['old']:\n self.notes[filename_base]['new'].append(str(line))\n self.notes[filename_base]['old'].append(str(line))\n text_file.close()\n\n\nreleasenotes = BotNotes()\n",
"step-5": "# coding=utf8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\"\"\"This is a method to read files, online and local, and cache them\"\"\"\n\nimport os\n\nfrom .Read import read as botread\nfrom .Database import db as botdb\n\n\nclass BotNotes():\n\n def __init__(self):\n self.notes = botdb.get_plugin_value('SpiceBot_Release_Notes', 'notes') or dict()\n self.dir_to_scan = botread.get_config_dirs(\"SpiceBot_Release_Notes\")\n self.load_txt_files(self.dir_to_scan)\n self.save_notes()\n\n def save_notes(self):\n savenotes = {}\n for notefile in list(self.notes.keys()):\n savenotes[notefile] = {\"old\": self.notes[notefile][\"old\"]}\n botdb.set_plugin_value('SpiceBot_Release_Notes', 'notes', savenotes)\n\n def load_txt_files(self, dir_to_scan):\n # iterate over files within\n for directory in dir_to_scan:\n for file in os.listdir(directory):\n filepath = os.path.join(directory, file)\n filepath = os.path.join(directory, file)\n if os.path.isfile(filepath) and filepath.endswith('.txt'):\n # gather file stats\n slashsplit = str(filepath).split(\"/\")\n filename = slashsplit[-1]\n filename_base = str(os.path.basename(filename).rsplit('.', 1)[0]).lower()\n\n if filename_base not in list(self.notes.keys()):\n self.notes[filename_base] = {}\n if \"old\" not in list(self.notes[filename_base].keys()):\n self.notes[filename_base][\"old\"] = []\n if \"new\" not in list(self.notes[filename_base].keys()):\n self.notes[filename_base][\"new\"] = []\n\n text_file = open(filepath, 'r')\n lines = text_file.readlines()\n for line in lines:\n if str(line) not in self.notes[filename_base][\"old\"]:\n self.notes[filename_base][\"new\"].append(str(line))\n self.notes[filename_base][\"old\"].append(str(line))\n text_file.close()\n\n\nreleasenotes = BotNotes()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
import re
import math
s=sys.stdin.read()
digits=re.findall(r"-?\d+",s)
listline= [int(e) for e in digits ]
x=listline[-1]
del(listline[-1])
n=len(listline)//2
customers=listline[:n]
grumpy=listline[n:]
maxcus=0
if x==n:
print(sum(customers))
else:
for i in range(n-x):
total=0
for j in range(i,i+x):
total+=customers[i]
for j in range(i):
if grumpy[j]!=1:
total+=customers[j]
for j in range(i+x,n):
if grumpy[j]!=1:
total+=customers[j]
maxcus=max(total,maxcus)
print(maxcus)
|
normal
|
{
"blob_id": "24bc43c1fe035430afde05fec1330e27fb5f1d86",
"index": 8809,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndel listline[-1]\n<mask token>\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-3": "<mask token>\ns = sys.stdin.read()\ndigits = re.findall('-?\\\\d+', s)\nlistline = [int(e) for e in digits]\nx = listline[-1]\ndel listline[-1]\nn = len(listline) // 2\ncustomers = listline[:n]\ngrumpy = listline[n:]\nmaxcus = 0\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-4": "import sys\nimport re\nimport math\ns = sys.stdin.read()\ndigits = re.findall('-?\\\\d+', s)\nlistline = [int(e) for e in digits]\nx = listline[-1]\ndel listline[-1]\nn = len(listline) // 2\ncustomers = listline[:n]\ngrumpy = listline[n:]\nmaxcus = 0\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-5": "import sys\nimport re\nimport math\ns=sys.stdin.read()\ndigits=re.findall(r\"-?\\d+\",s)\nlistline= [int(e) for e in digits ]\nx=listline[-1]\ndel(listline[-1])\nn=len(listline)//2\ncustomers=listline[:n]\ngrumpy=listline[n:]\nmaxcus=0\nif x==n:\n print(sum(customers))\nelse:\n for i in range(n-x):\n total=0\n for j in range(i,i+x):\n total+=customers[i]\n for j in range(i):\n if grumpy[j]!=1:\n total+=customers[j]\n for j in range(i+x,n):\n if grumpy[j]!=1:\n total+=customers[j]\n maxcus=max(total,maxcus)\n print(maxcus)\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserGpgKeyArgs', 'UserGpgKey']
@pulumi.input_type
class UserGpgKeyArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
user_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a UserGpgKey resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
pulumi.set(__self__, "key", key)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
@pulumi.input_type
class _UserGpgKeyState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering UserGpgKey resources.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if key is not None:
pulumi.set(__self__, "key", key)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if user_id is not None:
pulumi.set(__self__, "user_id", user_id)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
@user_id.setter
def user_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_id", value)
class UserGpgKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserGpgKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.
> Managing GPG keys for arbitrary users requires admin privileges.
**Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example_user = gitlab.get_user(username="example-user")
# Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.
example_user_gpg_key = gitlab.UserGpgKey("exampleUserGpgKey",
user_id=example_user.id,
key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
# Manages a GPG key for the current user
example_user_user_gpg_key = gitlab.UserGpgKey("exampleUserUserGpgKey", key=\"\"\"-----BEGIN PGP PUBLIC KEY BLOCK-----
...
-----END PGP PUBLIC KEY BLOCK-----\"\"\")
```
## Import
You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1
```
Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.
```sh
$ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1
```
:param str resource_name: The name of the resource.
:param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["user_id"] = user_id
__props__.__dict__["created_at"] = None
__props__.__dict__["key_id"] = None
super(UserGpgKey, __self__).__init__(
'gitlab:index/userGpgKey:UserGpgKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[int]] = None,
user_id: Optional[pulumi.Input[int]] = None) -> 'UserGpgKey':
"""
Get an existing UserGpgKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time when this key was created in GitLab.
:param pulumi.Input[str] key: The armored GPG public key.
:param pulumi.Input[int] key_id: The ID of the GPG key.
:param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["key"] = key
__props__.__dict__["key_id"] = key_id
__props__.__dict__["user_id"] = user_id
return UserGpgKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The time when this key was created in GitLab.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The armored GPG public key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[int]:
"""
The ID of the GPG key.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[Optional[int]]:
"""
The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.
"""
return pulumi.get(self, "user_id")
|
normal
|
{
"blob_id": "aa79d5cbe656979bf9c228f6a576f2bbf7e405ca",
"index": 2950,
"step-1": "<mask token>\n\n\n@pulumi.input_type\nclass _UserGpgKeyState:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-2": "<mask token>\n\n\n@pulumi.input_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n <mask token>\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-3": "<mask token>\n\n\n@pulumi.input_type\nclass UserGpgKeyArgs:\n <mask token>\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n <mask token>\n <mask token>\n <mask token>\n\n\n@pulumi.input_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'key_id', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-4": "<mask token>\n\n\n@pulumi.input_type\nclass UserGpgKeyArgs:\n\n def __init__(__self__, *, key: pulumi.Input[str], user_id: Optional[\n pulumi.Input[int]]=None):\n \"\"\"\n The set of arguments for constructing a UserGpgKey resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n pulumi.set(__self__, 'key', key)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: pulumi.Input[str]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\n@pulumi.input_type\nclass _UserGpgKeyState:\n\n def __init__(__self__, *, created_at: Optional[pulumi.Input[str]]=None,\n key: Optional[pulumi.Input[str]]=None, key_id: Optional[pulumi.\n Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, 'created_at', created_at)\n if key is not None:\n pulumi.set(__self__, 'key', key)\n if key_id is not None:\n pulumi.set(__self__, 'key_id', key_id)\n if user_id is not None:\n pulumi.set(__self__, 'user_id', user_id)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'created_at', value)\n\n @property\n @pulumi.getter\n def key(self) ->Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, 'key', value)\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'key_id', value)\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, 'user_id', value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n\n @overload\n def __init__(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n\n @overload\n def __init__(__self__, resource_name: str, args: UserGpgKeyArgs, opts:\n Optional[pulumi.ResourceOptions]=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\"\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\"\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs,\n pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.\n __dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.\n ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None,\n user_id: Optional[pulumi.Input[int]]=None, __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.\n get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError(\n 'Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError(\n '__props__ is only valid when passed in combination with a valid opts.id to get an existing resource'\n )\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__['key'] = key\n __props__.__dict__['user_id'] = user_id\n __props__.__dict__['created_at'] = None\n __props__.__dict__['key_id'] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey', resource_name, __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str, id: pulumi.Input[str], opts: Optional[\n pulumi.ResourceOptions]=None, created_at: Optional[pulumi.Input[str\n ]]=None, key: Optional[pulumi.Input[str]]=None, key_id: Optional[\n pulumi.Input[int]]=None, user_id: Optional[pulumi.Input[int]]=None\n ) ->'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)\n )\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n __props__.__dict__['created_at'] = created_at\n __props__.__dict__['key'] = key\n __props__.__dict__['key_id'] = key_id\n __props__.__dict__['user_id'] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name='createdAt')\n def created_at(self) ->pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, 'created_at')\n\n @property\n @pulumi.getter\n def key(self) ->pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter(name='keyId')\n def key_id(self) ->pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, 'key_id')\n\n @property\n @pulumi.getter(name='userId')\n def user_id(self) ->pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, 'user_id')\n",
"step-5": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom . import _utilities\n\n__all__ = ['UserGpgKeyArgs', 'UserGpgKey']\n\n@pulumi.input_type\nclass UserGpgKeyArgs:\n def __init__(__self__, *,\n key: pulumi.Input[str],\n user_id: Optional[pulumi.Input[int]] = None):\n \"\"\"\n The set of arguments for constructing a UserGpgKey resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n pulumi.set(__self__, \"key\", key)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)\n\n @property\n @pulumi.getter\n def key(self) -> pulumi.Input[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: pulumi.Input[str]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"user_id\", value)\n\n\n@pulumi.input_type\nclass _UserGpgKeyState:\n def __init__(__self__, *,\n created_at: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n key_id: Optional[pulumi.Input[int]] = None,\n user_id: Optional[pulumi.Input[int]] = None):\n \"\"\"\n Input properties used for looking up and filtering UserGpgKey resources.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if key_id is not None:\n pulumi.set(__self__, \"key_id\", key_id)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)\n\n @property\n @pulumi.getter(name=\"createdAt\")\n def created_at(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, \"created_at\")\n\n @created_at.setter\n def created_at(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"created_at\", value)\n\n @property\n @pulumi.getter\n def key(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"keyId\")\n def key_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, \"key_id\")\n\n @key_id.setter\n def key_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"key_id\", value)\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n @user_id.setter\n def user_id(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"user_id\", value)\n\n\nclass UserGpgKey(pulumi.CustomResource):\n @overload\n def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n ...\n @overload\n def __init__(__self__,\n resource_name: str,\n args: UserGpgKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n \"\"\"\n The `UserGpgKey` resource allows to manage the lifecycle of a GPG key assigned to the current user or a specific user.\n\n > Managing GPG keys for arbitrary users requires admin privileges.\n\n **Upstream API**: [GitLab REST API docs](https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key)\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_gitlab as gitlab\n\n example_user = gitlab.get_user(username=\"example-user\")\n # Manages a GPG key for the specified user. An admin token is required if `user_id` is specified.\n example_user_gpg_key = gitlab.UserGpgKey(\"exampleUserGpgKey\",\n user_id=example_user.id,\n key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n # Manages a GPG key for the current user\n example_user_user_gpg_key = gitlab.UserGpgKey(\"exampleUserUserGpgKey\", key=\\\"\\\"\\\"-----BEGIN PGP PUBLIC KEY BLOCK-----\n ...\n -----END PGP PUBLIC KEY BLOCK-----\\\"\\\"\\\")\n ```\n\n ## Import\n\n You can import a GPG key for a specific user using an id made up of `{user-id}:{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example 42:1\n ```\n\n Alternatively, you can import a GPG key for the current user using an id made up of `{key}`, e.g.\n\n ```sh\n $ pulumi import gitlab:index/userGpgKey:UserGpgKey example_user 1\n ```\n\n :param str resource_name: The name of the resource.\n :param UserGpgKeyArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(UserGpgKeyArgs, pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = UserGpgKeyArgs.__new__(UserGpgKeyArgs)\n\n if key is None and not opts.urn:\n raise TypeError(\"Missing required property 'key'\")\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"user_id\"] = user_id\n __props__.__dict__[\"created_at\"] = None\n __props__.__dict__[\"key_id\"] = None\n super(UserGpgKey, __self__).__init__(\n 'gitlab:index/userGpgKey:UserGpgKey',\n resource_name,\n __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n key_id: Optional[pulumi.Input[int]] = None,\n user_id: Optional[pulumi.Input[int]] = None) -> 'UserGpgKey':\n \"\"\"\n Get an existing UserGpgKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] created_at: The time when this key was created in GitLab.\n :param pulumi.Input[str] key: The armored GPG public key.\n :param pulumi.Input[int] key_id: The ID of the GPG key.\n :param pulumi.Input[int] user_id: The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserGpgKeyState.__new__(_UserGpgKeyState)\n\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"key_id\"] = key_id\n __props__.__dict__[\"user_id\"] = user_id\n return UserGpgKey(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name=\"createdAt\")\n def created_at(self) -> pulumi.Output[str]:\n \"\"\"\n The time when this key was created in GitLab.\n \"\"\"\n return pulumi.get(self, \"created_at\")\n\n @property\n @pulumi.getter\n def key(self) -> pulumi.Output[str]:\n \"\"\"\n The armored GPG public key.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter(name=\"keyId\")\n def key_id(self) -> pulumi.Output[int]:\n \"\"\"\n The ID of the GPG key.\n \"\"\"\n return pulumi.get(self, \"key_id\")\n\n @property\n @pulumi.getter(name=\"userId\")\n def user_id(self) -> pulumi.Output[Optional[int]]:\n \"\"\"\n The ID of the user to add the GPG key to. If this field is omitted, this resource manages a GPG key for the current user. Otherwise, this resource manages a GPG key for the specified user, and an admin token is required.\n \"\"\"\n return pulumi.get(self, \"user_id\")\n\n",
"step-ids": [
11,
19,
22,
26,
29
]
}
|
[
11,
19,
22,
26,
29
] |
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.views.generic import View
from django.contrib.auth import login
from django.contrib.auth.models import User
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
def post(self, request):
try:
user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(
'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))
user.set_password(request.POST.get('password'))
user.save()
except Exception as e:
print(e)
return render(request, 'users/register.html', locals())
return HttpResponseRedirect('/users/login')
class HomeView(View):
def get(self, request):
return HttpResponse(f"Home Page | Logged in as - {request.user}")
|
normal
|
{
"blob_id": "c9191df0fc04818b4df9c93a9479f75a60688aa9",
"index": 6372,
"step-1": "<mask token>\n\n\nclass RegisterView(View):\n <mask token>\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-2": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-3": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-4": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-5": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(\n 'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n def get(self, request):\n return HttpResponse(f\"Home Page | Logged in as - {request.user}\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# 4 Pillars of OOP:
# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.
# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.
# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).
# 4. Polymorphism: Polymorphism means the ability to take various forms.
# Encapsulation:
# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.
# This mechanism is often used to protect the data of an object from other objects.
# It’s one of the fundamental principles in any programming language that supports object-oriented programming.
# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.
# Once we make a variable as private, we can’t access them directly from the objects of that class.
# Now, let’s see how to create private variables:
# eg:
from abc import abstractmethod, ABC
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
# In the above example, wall is a private variable.
# Once a variable is declared as private, the only way to access those variables is through name mangling.
# In the name mangling process, an identifier with two leading underscores and one trailing underscore is
# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.
house = House(1)
# Using name mangling to access private variables
print(house._House__wall) # OutPut - 1
# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
# Abstraction:
# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.
# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.
# ABC is a class from the abc module in Python.
# If we extend any class with ABC and include any abstraction methods,
# then the classes inherited from this class will have to mandatorily implement those abstract methods.
# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).
# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print("Starting engine")
def stop(self):
print("Stopping engine")
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print("Car is in drive mode")
# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.
# Car is another class that is inherited from Vehicle, so it had to implement the drive method.
|
normal
|
{
"blob_id": "0e4c82d6eb77d2b6357925c9aab516bcc3310a4c",
"index": 140,
"step-1": "<mask token>\n\n\nclass House2:\n <mask token>\n <mask token>\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-2": "<mask token>\n\n\nclass House:\n <mask token>\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-3": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-4": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\nprint(house._House__wall)\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-5": "# 4 Pillars of OOP:\n# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.\n# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.\n# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).\n# 4. Polymorphism: Polymorphism means the ability to take various forms.\n\n# Encapsulation:\n\n# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.\n# This mechanism is often used to protect the data of an object from other objects.\n# It’s one of the fundamental principles in any programming language that supports object-oriented programming.\n# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.\n# Once we make a variable as private, we can’t access them directly from the objects of that class.\n# Now, let’s see how to create private variables:\n\n# eg:\nfrom abc import abstractmethod, ABC\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n# In the above example, wall is a private variable.\n# Once a variable is declared as private, the only way to access those variables is through name mangling.\n# In the name mangling process, an identifier with two leading underscores and one trailing underscore is\n# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.\n\n\nhouse = House(1)\n\n# Using name mangling to access private variables\nprint(house._House__wall) # OutPut - 1\n\n# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\n# Abstraction:\n\n# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.\n# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.\n# ABC is a class from the abc module in Python.\n# If we extend any class with ABC and include any abstraction methods,\n# then the classes inherited from this class will have to mandatorily implement those abstract methods.\n# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).\n# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .\n\n\nclass Vehicle(ABC):\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print(\"Starting engine\")\n\n def stop(self):\n print(\"Stopping engine\")\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print(\"Car is in drive mode\")\n\n\n# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.\n# Car is another class that is inherited from Vehicle, so it had to implement the drive method.\n",
"step-ids": [
9,
12,
13,
14,
17
]
}
|
[
9,
12,
13,
14,
17
] |
import os
from registration import Registration
from login import Login
def login():
""" redirect to login page"""
login_page = Login()
login_page.login_main_page()
def registration():
"""Register the new user"""
registration_page = Registration()
registration_page.registration_main_page()
if __name__ == '__main__':
ch = ''
while ch != 3:
os.system('clear')
print "\t\t\t\t\t\t\t***** MAIN MENU *****\n\n\n"
print "\n\t1. LOGIN \t\t\t\t\t2. REGISTER NEW USER\t\t\t\t\t\t3. EXIT\n"
try:
ch = str(raw_input('\n\n\t\t\t\t\tENTER YOUR RESPONSE :- '))
if ch == '1':
login()
elif ch == '2':
registration()
continue
elif ch == '3':
print("\tThank You !! Visit Again")
break
else:
print("WRONG CHOICE")
os.system('clear')
continue
except NameError:
print("\n\tSelect Your Option between 1 to 3")
ch = str(raw_input("\t\tEnter your choice : "))
except SyntaxError:
print("Select Your Option (1-3)")
ch = str(raw_input("\tEnter your choice : "))
|
normal
|
{
"blob_id": "8ebc11f4b9e28254ef40175b26744f2a5ab0c831",
"index": 2930,
"step-1": "import os\n\nfrom registration import Registration\nfrom login import Login\n\n\ndef login():\n \"\"\" redirect to login page\"\"\"\n login_page = Login()\n login_page.login_main_page()\n\n\ndef registration():\n \"\"\"Register the new user\"\"\"\n registration_page = Registration()\n registration_page.registration_main_page()\n\n\nif __name__ == '__main__':\n ch = ''\n\n while ch != 3:\n os.system('clear')\n print \"\\t\\t\\t\\t\\t\\t\\t***** MAIN MENU *****\\n\\n\\n\"\n print \"\\n\\t1. LOGIN \\t\\t\\t\\t\\t2. REGISTER NEW USER\\t\\t\\t\\t\\t\\t3. EXIT\\n\"\n try:\n ch = str(raw_input('\\n\\n\\t\\t\\t\\t\\tENTER YOUR RESPONSE :- '))\n if ch == '1':\n login()\n elif ch == '2':\n registration()\n continue\n elif ch == '3':\n print(\"\\tThank You !! Visit Again\")\n break\n else:\n print(\"WRONG CHOICE\")\n os.system('clear')\n continue\n\n except NameError:\n print(\"\\n\\tSelect Your Option between 1 to 3\")\n ch = str(raw_input(\"\\t\\tEnter your choice : \"))\n except SyntaxError:\n print(\"Select Your Option (1-3)\")\n ch = str(raw_input(\"\\tEnter your choice : \"))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@app.route('/probability', methods=['POST'])
def make_probability():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in probability: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in probability: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Computing probability of having content related to ' + cls)
probability = probability_terror(text, lang, cls)
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze', methods=['POST'])
def make_analyze():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in analyze: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(message=
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],
topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms', methods=['POST'])
def make_terms():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
texts = data['dataset']
print('Suggesting new terms for search...')
terms = new_terms(texts)
responses = jsonify(message='Suggested new terms for search: ',
terms=list(terms))
responses.status_code = 200
return responses
@app.route('/sento', methods=['POST'])
def make_sento():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in sento: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in sento: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Sento analysis')
probability = probability_terror(text, lang, cls)
print(probability)
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit = {'auditEventType': 'Start task', 'details': {'sento':
'NLP analysis'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(probability=probability, concepts=analysis[0],
key_ideas=analysis[1], topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier', methods=['POST'])
def make_classifier():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request('There is no data for the training')
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
annotated_data = data['annotated_data']
except:
responses = jsonify(
'Error in classifier: annotated data is missing')
return responses
try:
user_id = data['user_id']
except:
responses = jsonify('Error in classifier: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify('Error in classifier: case_id is missing')
return responses
try:
clas_name = data['clas_name']
except:
responses = jsonify(
'Error in classifier: classifier name is missing')
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses = jsonify(
'Training data set should have more than 10 samples per each class'
)
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print("Training a new classifier from the user's annotated dataset ")
accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name
)
data_audit = {'auditEventType': 'Start task', 'details': {
'classifier':
'Trains a new classifier based on the annotations provided by the user'
}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(message=
'Classifier has been saved. Accuracy given in % - calculated using C-10V'
, accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting', methods=['POST'])
def make_claslisting():
user_id = None
case_id = None
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
user_id = data['user_id']
except:
responses = jsonify(message=
'Error in classifiers listing: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify(message=
'Error in classifiers listing: case_id is missing')
return responses
available_classifiers = claslisting(user_id, case_id)
data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':
'Lists the available classifiers'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg == '':
msg = 'Error'
return msg, code
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/vectorize', methods=['POST'])
def make_vectorize():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in vectorize: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Vectorize...')
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,
embeddings, len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text = np.zeros((300,) * 1)
print(vectorized_text)
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability', methods=['POST'])
def make_probability():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in probability: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in probability: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Computing probability of having content related to ' + cls)
probability = probability_terror(text, lang, cls)
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze', methods=['POST'])
def make_analyze():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in analyze: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(message=
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],
topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms', methods=['POST'])
def make_terms():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
texts = data['dataset']
print('Suggesting new terms for search...')
terms = new_terms(texts)
responses = jsonify(message='Suggested new terms for search: ',
terms=list(terms))
responses.status_code = 200
return responses
@app.route('/sento', methods=['POST'])
def make_sento():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in sento: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in sento: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Sento analysis')
probability = probability_terror(text, lang, cls)
print(probability)
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit = {'auditEventType': 'Start task', 'details': {'sento':
'NLP analysis'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(probability=probability, concepts=analysis[0],
key_ideas=analysis[1], topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier', methods=['POST'])
def make_classifier():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request('There is no data for the training')
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
annotated_data = data['annotated_data']
except:
responses = jsonify(
'Error in classifier: annotated data is missing')
return responses
try:
user_id = data['user_id']
except:
responses = jsonify('Error in classifier: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify('Error in classifier: case_id is missing')
return responses
try:
clas_name = data['clas_name']
except:
responses = jsonify(
'Error in classifier: classifier name is missing')
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses = jsonify(
'Training data set should have more than 10 samples per each class'
)
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print("Training a new classifier from the user's annotated dataset ")
accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name
)
data_audit = {'auditEventType': 'Start task', 'details': {
'classifier':
'Trains a new classifier based on the annotations provided by the user'
}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(message=
'Classifier has been saved. Accuracy given in % - calculated using C-10V'
, accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting', methods=['POST'])
def make_claslisting():
user_id = None
case_id = None
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
user_id = data['user_id']
except:
responses = jsonify(message=
'Error in classifiers listing: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify(message=
'Error in classifiers listing: case_id is missing')
return responses
available_classifiers = claslisting(user_id, case_id)
data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':
'Lists the available classifiers'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg == '':
msg = 'Error'
return msg, code
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
emb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':
'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}
@app.route('/vectorize', methods=['POST'])
def make_vectorize():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in vectorize: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Vectorize...')
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,
embeddings, len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text = np.zeros((300,) * 1)
print(vectorized_text)
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability', methods=['POST'])
def make_probability():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in probability: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in probability: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Computing probability of having content related to ' + cls)
probability = probability_terror(text, lang, cls)
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze', methods=['POST'])
def make_analyze():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in analyze: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(message=
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],
topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms', methods=['POST'])
def make_terms():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
texts = data['dataset']
print('Suggesting new terms for search...')
terms = new_terms(texts)
responses = jsonify(message='Suggested new terms for search: ',
terms=list(terms))
responses.status_code = 200
return responses
@app.route('/sento', methods=['POST'])
def make_sento():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in sento: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in sento: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Sento analysis')
probability = probability_terror(text, lang, cls)
print(probability)
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit = {'auditEventType': 'Start task', 'details': {'sento':
'NLP analysis'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(probability=probability, concepts=analysis[0],
key_ideas=analysis[1], topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier', methods=['POST'])
def make_classifier():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request('There is no data for the training')
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
annotated_data = data['annotated_data']
except:
responses = jsonify(
'Error in classifier: annotated data is missing')
return responses
try:
user_id = data['user_id']
except:
responses = jsonify('Error in classifier: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify('Error in classifier: case_id is missing')
return responses
try:
clas_name = data['clas_name']
except:
responses = jsonify(
'Error in classifier: classifier name is missing')
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses = jsonify(
'Training data set should have more than 10 samples per each class'
)
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print("Training a new classifier from the user's annotated dataset ")
accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name
)
data_audit = {'auditEventType': 'Start task', 'details': {
'classifier':
'Trains a new classifier based on the annotations provided by the user'
}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(message=
'Classifier has been saved. Accuracy given in % - calculated using C-10V'
, accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting', methods=['POST'])
def make_claslisting():
user_id = None
case_id = None
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
user_id = data['user_id']
except:
responses = jsonify(message=
'Error in classifiers listing: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify(message=
'Error in classifiers listing: case_id is missing')
return responses
available_classifiers = claslisting(user_id, case_id)
data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':
'Lists the available classifiers'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg == '':
msg = 'Error'
return msg, code
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
<|reserved_special_token_1|>
import pandas as pd
import pickle
from flask import Flask, render_template, request, jsonify
from utilities import load_data, detect_language
from preprocessing import preprocess, Tagger, remove_stopwords
import json
from gensim.models import KeyedVectors
from Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros
import numpy as np
import os
from analysis import analyze
from probability_terror import probability_terror
from new_terms_no_lang import new_terms
from classifier import classifier
from claslisting import claslisting
from audit import audit
app = Flask(__name__)
emb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':
'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}
@app.route('/vectorize', methods=['POST'])
def make_vectorize():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in vectorize: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Vectorize...')
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,
embeddings, len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text = np.zeros((300,) * 1)
print(vectorized_text)
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability', methods=['POST'])
def make_probability():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in probability: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in probability: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Computing probability of having content related to ' + cls)
probability = probability_terror(text, lang, cls)
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze', methods=['POST'])
def make_analyze():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in analyze: text is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(message=
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],
topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms', methods=['POST'])
def make_terms():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
texts = data['dataset']
print('Suggesting new terms for search...')
terms = new_terms(texts)
responses = jsonify(message='Suggested new terms for search: ',
terms=list(terms))
responses.status_code = 200
return responses
@app.route('/sento', methods=['POST'])
def make_sento():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
text = data['text']
except:
responses = jsonify('Error in sento: text is missing')
return responses
try:
cls = data['classifier']
except:
responses = jsonify('Error in sento: classifier is missing')
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print('Sento analysis')
probability = probability_terror(text, lang, cls)
print(probability)
filename = os.path.join(os.path.dirname(__file__),
'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit = {'auditEventType': 'Start task', 'details': {'sento':
'NLP analysis'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(probability=probability, concepts=analysis[0],
key_ideas=analysis[1], topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier', methods=['POST'])
def make_classifier():
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request('There is no data for the training')
else:
try:
lang = data['lang']
except:
try:
lang = detect_language(data['text'])
print(lang)
except:
responses = jsonify(
'Error in vectorize: language field is missing')
return responses
try:
annotated_data = data['annotated_data']
except:
responses = jsonify(
'Error in classifier: annotated data is missing')
return responses
try:
user_id = data['user_id']
except:
responses = jsonify('Error in classifier: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify('Error in classifier: case_id is missing')
return responses
try:
clas_name = data['clas_name']
except:
responses = jsonify(
'Error in classifier: classifier name is missing')
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses = jsonify(
'Training data set should have more than 10 samples per each class'
)
return responses
if lang not in ['en', 'es', 'ar', 'ro', 'fr']:
responses = jsonify(
"Language not available. Language must be in ['en','es','ar','ro','fr']"
)
return responses
print("Training a new classifier from the user's annotated dataset ")
accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name
)
data_audit = {'auditEventType': 'Start task', 'details': {
'classifier':
'Trains a new classifier based on the annotations provided by the user'
}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(message=
'Classifier has been saved. Accuracy given in % - calculated using C-10V'
, accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting', methods=['POST'])
def make_claslisting():
user_id = None
case_id = None
try:
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return bad_request()
else:
try:
user_id = data['user_id']
except:
responses = jsonify(message=
'Error in classifiers listing: user_id is missing')
return responses
try:
case_id = data['case_id']
except:
responses = jsonify(message=
'Error in classifiers listing: case_id is missing')
return responses
available_classifiers = claslisting(user_id, case_id)
data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':
'Lists the available classifiers'}, 'principal': 'Analyst'}
datajson = json.dumps(data_audit)
results_audit = audit(datajson)
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg == '':
msg = 'Error'
return msg, code
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
<|reserved_special_token_1|>
# POST API for Red Alert project - NLP and Metalearning components
# Insikt Intelligence S.L. 2019
import pandas as pd
import pickle
from flask import Flask, render_template, request, jsonify
from utilities import load_data, detect_language
from preprocessing import preprocess, Tagger, remove_stopwords
import json
from gensim.models import KeyedVectors
from Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros
import numpy as np
import os
from analysis import analyze
from probability_terror import probability_terror
from new_terms_no_lang import new_terms
from classifier import classifier
from claslisting import claslisting
from audit import audit
app = Flask(__name__)
emb_dict = {"en": "embedding-EN", "ar": "embedding-AR", "es": "embedding-ES", "ro": "embedding-RO","fr": "embedding-FR"}
@app.route('/vectorize',methods=['POST'])
def make_vectorize():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in vectorize: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Vectorize...")
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens=to_vector_single_nonzeros(no_stpw_text, embeddings,len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text =np.zeros((300,)*1)
print(vectorized_text)
#Send the response codes
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability',methods=['POST'])
def make_probability():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text,language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in probability: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in probability: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Computing probability of having content related to "+cls)
probability = probability_terror(text,lang,cls)
#Send the response codes
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze',methods=['POST'])
def make_analyze():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text'] # we assume text is tokenized
except:
responses=jsonify("Error in analyze: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify( message = "Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
#print(analysis[0])
#Send the response codes
responses = jsonify(concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms',methods=['POST'])
def make_terms():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
texts = data['dataset'] # we assume text is tokenized
#Preprocess the text
print("Suggesting new terms for search...")
terms=new_terms(texts)
#print(terms)
#Send the response codes
responses = jsonify(message="Suggested new terms for search: ",terms= list(terms))
responses.status_code = 200
return responses
@app.route('/sento',methods=['POST'])
def make_sento():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text, language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in sento: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in sento: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Sento analysis")
# Probability
probability = probability_terror(text,lang,cls)
print(probability)
# Analyze
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit={"auditEventType":"Start task","details":{"sento":"NLP analysis"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(probability=probability,concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier',methods=['POST'])
def make_classifier():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request("There is no data for the training"))
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
annotated_data = data['annotated_data']
except:
responses=jsonify("Error in classifier: annotated data is missing")
return responses
try:
user_id=data['user_id']
except:
responses=jsonify("Error in classifier: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify("Error in classifier: case_id is missing")
return responses
try:
clas_name=data['clas_name']
except:
responses=jsonify("Error in classifier: classifier name is missing")
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses=jsonify( "Training data set should have more than 10 samples per each class")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Train the new classifier
print("Training a new classifier from the user's annotated dataset ")
accuracy=classifier(annotated_data,lang,user_id,case_id,clas_name)
data_audit={"auditEventType":"Start task","details":{"classifier":"Trains a new classifier based on the annotations provided by the user"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(message="Classifier has been saved. Accuracy given in % - calculated using C-10V", accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting',methods=['POST'])
def make_claslisting():
user_id=None
case_id=None
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
try:
user_id=data['user_id']
except:
responses=jsonify(message="Error in classifiers listing: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify(message="Error in classifiers listing: case_id is missing")
return responses
available_classifiers=claslisting(user_id,case_id)
data_audit={"auditEventType":"Start task","details":{"claslisting":"Lists the available classifiers"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg=='':
msg = 'Error'
return msg, code
if __name__ == '__main__':
#app.run()
app.run(host='0.0.0.0',port=5000)
|
flexible
|
{
"blob_id": "b51e0ee80a2488197470627821204d1f74cd62a1",
"index": 5437,
"step-1": "<mask token>\n\n\n@app.route('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\n@app.route('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\n@app.route('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\n@app.route('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\n@app.route('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\n@app.route('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\n@app.route('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\n@app.route('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\n@app.route('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\n@app.route('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\nemb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':\n 'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}\n\n\n@app.route('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\n@app.route('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\n@app.route('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\n@app.route('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\n@app.route('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\n@app.route('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n",
"step-4": "import pandas as pd\nimport pickle\nfrom flask import Flask, render_template, request, jsonify\nfrom utilities import load_data, detect_language\nfrom preprocessing import preprocess, Tagger, remove_stopwords\nimport json\nfrom gensim.models import KeyedVectors\nfrom Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros\nimport numpy as np\nimport os\nfrom analysis import analyze\nfrom probability_terror import probability_terror\nfrom new_terms_no_lang import new_terms\nfrom classifier import classifier\nfrom claslisting import claslisting\nfrom audit import audit\napp = Flask(__name__)\nemb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':\n 'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}\n\n\n@app.route('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\n@app.route('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\n@app.route('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\n@app.route('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\n@app.route('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\n@app.route('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n",
"step-5": "# POST API for Red Alert project - NLP and Metalearning components\n# Insikt Intelligence S.L. 2019\n\nimport pandas as pd\nimport pickle\nfrom flask import Flask, render_template, request, jsonify\nfrom utilities import load_data, detect_language\nfrom preprocessing import preprocess, Tagger, remove_stopwords\nimport json\nfrom gensim.models import KeyedVectors\nfrom Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros\nimport numpy as np\nimport os\nfrom analysis import analyze\nfrom probability_terror import probability_terror\nfrom new_terms_no_lang import new_terms\nfrom classifier import classifier\nfrom claslisting import claslisting\nfrom audit import audit\n\napp = Flask(__name__)\n\nemb_dict = {\"en\": \"embedding-EN\", \"ar\": \"embedding-AR\", \"es\": \"embedding-ES\", \"ro\": \"embedding-RO\",\"fr\": \"embedding-FR\"}\n\n@app.route('/vectorize',methods=['POST'])\ndef make_vectorize():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n #Get the text and the language\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in vectorize: text is missing\")\n return responses \n \n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n #Preprocess the text\n print(\"Vectorize...\")\n\n embeddings = Embeddings(emb_dict[lang])\n\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens=to_vector_single_nonzeros(no_stpw_text, embeddings,len(no_stpw_text))\n\t\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text =np.zeros((300,)*1)\n print(vectorized_text)\n \n #Send the response codes\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\n@app.route('/probability',methods=['POST'])\ndef make_probability():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n #Get the text,language and classifier\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in probability: text is missing\")\n return responses\n \n try:\n cls = data['classifier']\n except:\n responses=jsonify(\"Error in probability: classifier is missing\")\n return responses\n \n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n #Preprocess the text\n print(\"Computing probability of having content related to \"+cls)\n\n probability = probability_terror(text,lang,cls)\n \n #Send the response codes\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\n@app.route('/analyze',methods=['POST'])\ndef make_analyze():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n #Get the text and the language\n\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text'] # we assume text is tokenized\n except:\n responses=jsonify(\"Error in analyze: text is missing\")\n return responses\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify( message = \"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')\n registry = load_data(filename)\n\n analysis = analyze(text, lang, registry)\n #print(analysis[0])\n #Send the response codes\n responses = jsonify(concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\n@app.route('/terms',methods=['POST'])\ndef make_terms():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n texts = data['dataset'] # we assume text is tokenized \n \n\t#Preprocess the text\n print(\"Suggesting new terms for search...\") \n terms=new_terms(texts)\n\t#print(terms)\n #Send the response codes\n responses = jsonify(message=\"Suggested new terms for search: \",terms= list(terms))\n responses.status_code = 200\n return responses\n\n\n@app.route('/sento',methods=['POST'])\ndef make_sento():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n #Get the text, language and classifier\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in sento: text is missing\")\n return responses \n try:\n cls = data['classifier']\n except:\n responses=jsonify(\"Error in sento: classifier is missing\")\n return responses\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n \n\t#Preprocess the text\n print(\"Sento analysis\") \n\n\n # Probability\n probability = probability_terror(text,lang,cls)\n print(probability)\n\n # Analyze\n filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')\n registry = load_data(filename)\n\n analysis = analyze(text, lang, registry)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"sento\":\"NLP analysis\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n\n #Send the response codes\n responses = jsonify(probability=probability,concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])\n responses.status_code = 200\n return responses\n\n@app.route('/classifier',methods=['POST'])\ndef make_classifier():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request(\"There is no data for the training\"))\n else:\n #Get the text and the language\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses=jsonify(\"Error in classifier: annotated data is missing\")\n return responses\n try:\n user_id=data['user_id']\n except:\n responses=jsonify(\"Error in classifier: user_id is missing\")\n return responses\n try: \n case_id=data['case_id']\n except:\n responses=jsonify(\"Error in classifier: case_id is missing\")\n return responses\n try: \n clas_name=data['clas_name']\n except:\n responses=jsonify(\"Error in classifier: classifier name is missing\")\n return responses\n\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses=jsonify( \"Training data set should have more than 10 samples per each class\")\n return responses\t\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n #Train the new classifier\n print(\"Training a new classifier from the user's annotated dataset \")\n\n accuracy=classifier(annotated_data,lang,user_id,case_id,clas_name)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"classifier\":\"Trains a new classifier based on the annotations provided by the user\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n #Send the response codes\n responses = jsonify(message=\"Classifier has been saved. Accuracy given in % - calculated using C-10V\", accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n@app.route('/claslisting',methods=['POST'])\ndef make_claslisting():\n user_id=None\n case_id=None\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n try:\n user_id=data['user_id']\n except:\n responses=jsonify(message=\"Error in classifiers listing: user_id is missing\")\n return responses\n try:\n case_id=data['case_id']\n except:\n responses=jsonify(message=\"Error in classifiers listing: case_id is missing\")\n return responses\n \n available_classifiers=claslisting(user_id,case_id)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"claslisting\":\"Lists the available classifiers\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n #Send the response codes\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n \n\n@app.route('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg=='':\n msg = 'Error'\n return msg, code\n\nif __name__ == '__main__':\n\n #app.run()\n app.run(host='0.0.0.0',port=5000)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
def calculadora(calcu):
if calcu == '1':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} + {s2} = {s1 + s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '2':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} - {s2} = {s1 - s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '3':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f' {s1} x {s2} = {s1 * s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '4':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} / {s2} = {s1 / s2}')
input('\nPresione una tecla para continuar.')
elif calcu == 'q':
print('Gracias, Vuelva Prontoss')
exit()
else:
os.system('clear')
print('Lo siento no es un numero valido!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def menuCalc():
os.system('clear')
print('Esto parece un menu:')
print('\t1 - Suma')
print('\t2 - Resta')
print('\t3 - Multiplicacion')
print('\t4 - Division')
print('\tq - Para salir')
def calculadora(calcu):
if calcu == '1':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} + {s2} = {s1 + s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '2':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} - {s2} = {s1 - s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '3':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f' {s1} x {s2} = {s1 * s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '4':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} / {s2} = {s1 / s2}')
input('\nPresione una tecla para continuar.')
elif calcu == 'q':
print('Gracias, Vuelva Prontoss')
exit()
else:
os.system('clear')
print('Lo siento no es un numero valido!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def menuCalc():
os.system('clear')
print('Esto parece un menu:')
print('\t1 - Suma')
print('\t2 - Resta')
print('\t3 - Multiplicacion')
print('\t4 - Division')
print('\tq - Para salir')
def calculadora(calcu):
if calcu == '1':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} + {s2} = {s1 + s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '2':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} - {s2} = {s1 - s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '3':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f' {s1} x {s2} = {s1 * s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '4':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} / {s2} = {s1 / s2}')
input('\nPresione una tecla para continuar.')
elif calcu == 'q':
print('Gracias, Vuelva Prontoss')
exit()
else:
os.system('clear')
print('Lo siento no es un numero valido!')
while True:
menuCalc()
calc = input('Ingrese su opcion: ')
calculadora(calc)
<|reserved_special_token_1|>
import os, sys
def menuCalc():
os.system('clear')
print('Esto parece un menu:')
print('\t1 - Suma')
print('\t2 - Resta')
print('\t3 - Multiplicacion')
print('\t4 - Division')
print('\tq - Para salir')
def calculadora(calcu):
if calcu == '1':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} + {s2} = {s1 + s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '2':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} - {s2} = {s1 - s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '3':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f' {s1} x {s2} = {s1 * s2}')
input('\nPresione una tecla para continuar.')
elif calcu == '4':
os.system('clear')
s1 = int(input('Ingrese un numero\n'))
s2 = int(input('Ingrese otro\n'))
os.system('clear')
print(f'{s1} / {s2} = {s1 / s2}')
input('\nPresione una tecla para continuar.')
elif calcu == 'q':
print('Gracias, Vuelva Prontoss')
exit()
else:
os.system('clear')
print('Lo siento no es un numero valido!')
while True:
menuCalc()
calc = input('Ingrese su opcion: ')
calculadora(calc)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Calcu.py
#
import os, sys
def menuCalc():
os.system('clear')
print("Esto parece un menu:")
print("\t1 - Suma")
print("\t2 - Resta")
print("\t3 - Multiplicacion")
print("\t4 - Division")
print("\tq - Para salir")
def calculadora(calcu,):
if calcu == "1":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} + {s2} = {s1+s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "2":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} - {s2} = {s1-s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "3":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f" {s1} x {s2} = {s1*s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "4":
os.system('clear')
s1=int(input("Ingrese un numero\n"))
s2=int(input("Ingrese otro\n"))
os.system('clear')
print(f"{s1} / {s2} = {s1 / s2}")
input("\nPresione una tecla para continuar.")
elif calcu == "q":
print("Gracias, Vuelva Prontoss")
exit()
else:
os.system('clear')
print("Lo siento no es un numero valido!")
while True:
menuCalc()
calc = input("Ingrese su opcion: ")
calculadora(calc)
|
flexible
|
{
"blob_id": "ac033e45ea61770c302be677f4dfc95945e2cca5",
"index": 6100,
"step-1": "<mask token>\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\nwhile True:\n menuCalc()\n calc = input('Ingrese su opcion: ')\n calculadora(calc)\n",
"step-4": "import os, sys\n\n\ndef menuCalc():\n os.system('clear')\n print('Esto parece un menu:')\n print('\\t1 - Suma')\n print('\\t2 - Resta')\n print('\\t3 - Multiplicacion')\n print('\\t4 - Division')\n print('\\tq - Para salir')\n\n\ndef calculadora(calcu):\n if calcu == '1':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} + {s2} = {s1 + s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '2':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} - {s2} = {s1 - s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '3':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f' {s1} x {s2} = {s1 * s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == '4':\n os.system('clear')\n s1 = int(input('Ingrese un numero\\n'))\n s2 = int(input('Ingrese otro\\n'))\n os.system('clear')\n print(f'{s1} / {s2} = {s1 / s2}')\n input('\\nPresione una tecla para continuar.')\n elif calcu == 'q':\n print('Gracias, Vuelva Prontoss')\n exit()\n else:\n os.system('clear')\n print('Lo siento no es un numero valido!')\n\n\nwhile True:\n menuCalc()\n calc = input('Ingrese su opcion: ')\n calculadora(calc)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Calcu.py\n# \n\nimport os, sys\n\ndef menuCalc():\n\n os.system('clear')\n print(\"Esto parece un menu:\")\n print(\"\\t1 - Suma\")\n print(\"\\t2 - Resta\")\n print(\"\\t3 - Multiplicacion\")\n print(\"\\t4 - Division\")\n print(\"\\tq - Para salir\")\n \ndef calculadora(calcu,):\n\tif calcu == \"1\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} + {s2} = {s1+s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"2\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} - {s2} = {s1-s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"3\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\" {s1} x {s2} = {s1*s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"4\":\n\t\tos.system('clear')\n\t\ts1=int(input(\"Ingrese un numero\\n\"))\n\t\ts2=int(input(\"Ingrese otro\\n\"))\n\t\tos.system('clear')\n\t\tprint(f\"{s1} / {s2} = {s1 / s2}\")\n\t\tinput(\"\\nPresione una tecla para continuar.\")\n\telif calcu == \"q\":\n\t\tprint(\"Gracias, Vuelva Prontoss\")\n\t\texit()\n\telse:\n\t\tos.system('clear')\n\t\tprint(\"Lo siento no es un numero valido!\")\n\nwhile True:\n \n menuCalc()\n calc = input(\"Ingrese su opcion: \")\n calculadora(calc)\n\t\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print('Pass')
else:
print('Fail')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge_sort(arr):
if len(arr) > 1:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
merge_sort(left)
merge_sort(right)
merge(arr, left, right)
<|reserved_special_token_0|>
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print('Pass')
else:
print('Fail')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge_sort(arr):
if len(arr) > 1:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
merge_sort(left)
merge_sort(right)
merge(arr, left, right)
def merge(arr, left, right):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] > right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print('Pass')
else:
print('Fail')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge_sort(arr):
if len(arr) > 1:
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
merge_sort(left)
merge_sort(right)
merge(arr, left, right)
def merge(arr, left, right):
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] > right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print('Pass')
else:
print('Fail')
test_function([[1, 2, 3, 4, 5], [542, 31]])
test_function([[4, 6, 2, 5, 9, 8], [964, 852]])
test_function([[1, 2, 3], [32, 1]])
test_function([[], []])
test_function([[9, 9, 9, 9, 9, 9], [999, 999]])
<|reserved_special_token_1|>
# Merge sort is used to sort the elements
def merge_sort(arr):
if len(arr) > 1:
# Recursion is used to continuously split the array in half.
mid = len(arr) // 2
# Using Auxiliary storage here
left = arr[:mid]
right = arr[mid:]
# Traverse the left side of the array
merge_sort(left)
# Traverse the right side of the array
merge_sort(right)
# Then we merge the left and right side
merge(arr, left, right)
def merge(arr, left, right):
i = 0
j = 0
k = 0
# I want the array to be in descending order
while i < len(left) and j < len(right):
# We let the array at k be the largest values
if left[i] > right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
# One of the two arrays will be left with elements so we dump
# which ever one still has items in it.
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
# We sort the list with merge sort
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
# Convert them to ints and return the two numbers
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print("Pass")
else:
print("Fail")
# Test case 1:
test_function([[1, 2, 3, 4, 5], [542, 31]])
# Test case 2:
test_function([[4, 6, 2, 5, 9, 8], [964, 852]])
# Test case 3:
test_function([[1, 2, 3], [32, 1]])
# Test case 4:
test_function([[], []])
# Test case 5:
test_function([[9, 9, 9, 9, 9, 9], [999, 999]])
|
flexible
|
{
"blob_id": "264b48c2b9ce4ec948ca5ba548e708848760f3dc",
"index": 8271,
"step-1": "<mask token>\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n",
"step-2": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\n<mask token>\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n",
"step-3": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\n<mask token>\n",
"step-4": "def merge_sort(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n if len(input_list) == 0:\n return []\n merge_sort(input_list)\n first_number = ''\n second_number = ''\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print('Pass')\n else:\n print('Fail')\n\n\ntest_function([[1, 2, 3, 4, 5], [542, 31]])\ntest_function([[4, 6, 2, 5, 9, 8], [964, 852]])\ntest_function([[1, 2, 3], [32, 1]])\ntest_function([[], []])\ntest_function([[9, 9, 9, 9, 9, 9], [999, 999]])\n",
"step-5": "# Merge sort is used to sort the elements\ndef merge_sort(arr):\n if len(arr) > 1:\n # Recursion is used to continuously split the array in half.\n mid = len(arr) // 2\n # Using Auxiliary storage here\n left = arr[:mid]\n right = arr[mid:]\n # Traverse the left side of the array\n merge_sort(left)\n # Traverse the right side of the array\n merge_sort(right)\n # Then we merge the left and right side\n merge(arr, left, right)\n\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n # I want the array to be in descending order\n while i < len(left) and j < len(right):\n # We let the array at k be the largest values\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n\n # One of the two arrays will be left with elements so we dump\n # which ever one still has items in it.\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\n\ndef rearrange_digits(input_list):\n\n if len(input_list) == 0:\n return []\n\n # We sort the list with merge sort\n merge_sort(input_list)\n\n first_number = ''\n second_number = ''\n\n for i in range(0, len(input_list)):\n if i % 2 == 0:\n first_number += str(input_list[i])\n else:\n second_number += str(input_list[i])\n # Convert them to ints and return the two numbers\n ans = [int(first_number), int(second_number)]\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n if sum(output) == sum(solution):\n print(\"Pass\")\n else:\n print(\"Fail\")\n\n\n# Test case 1:\ntest_function([[1, 2, 3, 4, 5], [542, 31]])\n\n# Test case 2:\ntest_function([[4, 6, 2, 5, 9, 8], [964, 852]])\n\n# Test case 3:\ntest_function([[1, 2, 3], [32, 1]])\n\n# Test case 4:\ntest_function([[], []])\n\n# Test case 5:\ntest_function([[9, 9, 9, 9, 9, 9], [999, 999]])",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
root = TreeNode(t1.val + t2.val)
root.left = self.mergeTrees(t1.left, t2.left)
root.right = self.mergeTrees(t1.right, t2.right)
return root
else:
return t1 or t2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
root = TreeNode(t1.val + t2.val)
root.left = self.mergeTrees(t1.left, t2.left)
root.right = self.mergeTrees(t1.right, t2.right)
return root
else:
return t1 or t2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__source__ = 'https://leetcode.com/problems/merge-two-binary-trees/'
<|reserved_special_token_0|>
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
root = TreeNode(t1.val + t2.val)
root.left = self.mergeTrees(t1.left, t2.left)
root.right = self.mergeTrees(t1.right, t2.right)
return root
else:
return t1 or t2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = """
# Thought: https://leetcode.com/problems/merge-two-binary-trees/solution/
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# DFS
# 10ms 40.59%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 != null && t2 != null) {
TreeNode root = new TreeNode(t1.val + t2.val);
root.left = mergeTrees(t1.left, t2.left);
root.right = mergeTrees(t1.right, t2.right);
return root;
} else if (t1 == null) {
return t2;
} else {
return t1;
}
}
}
# DFS
# 6ms 98.05%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 == null)
return t2;
if (t2 == null)
return t1;
t1.val += t2.val;
t1.left = mergeTrees(t1.left, t2.left);
t1.right = mergeTrees(t1.right, t2.right);
return t1;
}
}
# BFS
# 8ms 69.45%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 == null)
return t2;
Stack < TreeNode[] > stack = new Stack < > ();
stack.push(new TreeNode[] {t1, t2});
while (!stack.isEmpty()) {
TreeNode[] t = stack.pop();
if (t[0] == null || t[1] == null) {
continue;
}
t[0].val += t[1].val;
if (t[0].left == null) {
t[0].left = t[1].left;
} else {
stack.push(new TreeNode[] {t[0].left, t[1].left});
}
if (t[0].right == null) {
t[0].right = t[1].right;
} else {
stack.push(new TreeNode[] {t[0].right, t[1].right});
}
}
return t1;
}
}
"""
<|reserved_special_token_1|>
__source__ = 'https://leetcode.com/problems/merge-two-binary-trees/'
# Time: O(n)
# Space: O(n)
#
# Description: Leetcode # 617. Merge Two Binary Trees
#
# Given two binary trees and imagine that when you put one of them to cover the other,
# some nodes of the two trees are overlapped while the others are not.
#
# You need to merge them into a new binary tree. The merge rule is that if two nodes overlap,
# then sum node values up as the new value of the merged node. Otherwise,
# the NOT null node will be used as the node of new tree.
#
# Example 1:
# Input:
# Tree 1 Tree 2
# 1 2
# / \ / \
# 3 2 1 3
# / \ \
# 5 4 7
# Output:
# Merged tree:
# 3
# / \
# 4 5
# / \ \
# 5 4 7
# Note: The merging process must start from the root nodes of both trees.
#
# Hide Company Tags Amazon
# Hide Tags Tree
#
import unittest
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 68ms 68.16%
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
root = TreeNode(t1.val + t2.val)
root.left = self.mergeTrees(t1.left, t2.left)
root.right = self.mergeTrees(t1.right, t2.right)
return root
else:
return t1 or t2
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/merge-two-binary-trees/solution/
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# DFS
# 10ms 40.59%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 != null && t2 != null) {
TreeNode root = new TreeNode(t1.val + t2.val);
root.left = mergeTrees(t1.left, t2.left);
root.right = mergeTrees(t1.right, t2.right);
return root;
} else if (t1 == null) {
return t2;
} else {
return t1;
}
}
}
# DFS
# 6ms 98.05%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 == null)
return t2;
if (t2 == null)
return t1;
t1.val += t2.val;
t1.left = mergeTrees(t1.left, t2.left);
t1.right = mergeTrees(t1.right, t2.right);
return t1;
}
}
# BFS
# 8ms 69.45%
class Solution {
public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {
if (t1 == null)
return t2;
Stack < TreeNode[] > stack = new Stack < > ();
stack.push(new TreeNode[] {t1, t2});
while (!stack.isEmpty()) {
TreeNode[] t = stack.pop();
if (t[0] == null || t[1] == null) {
continue;
}
t[0].val += t[1].val;
if (t[0].left == null) {
t[0].left = t[1].left;
} else {
stack.push(new TreeNode[] {t[0].left, t[1].left});
}
if (t[0].right == null) {
t[0].right = t[1].right;
} else {
stack.push(new TreeNode[] {t[0].right, t[1].right});
}
}
return t1;
}
}
'''
|
flexible
|
{
"blob_id": "42371760d691eac9c3dfe5693b03cbecc13fd94d",
"index": 6066,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass TestMethods(unittest.TestCase):\n\n def test_Local(self):\n self.assertEqual(1, 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def mergeTrees(self, t1, t2):\n \"\"\"\n :type t1: TreeNode\n :type t2: TreeNode\n :rtype: TreeNode\n \"\"\"\n if t1 and t2:\n root = TreeNode(t1.val + t2.val)\n root.left = self.mergeTrees(t1.left, t2.left)\n root.right = self.mergeTrees(t1.right, t2.right)\n return root\n else:\n return t1 or t2\n\n\nclass TestMethods(unittest.TestCase):\n\n def test_Local(self):\n self.assertEqual(1, 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TreeNode(object):\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n\n def mergeTrees(self, t1, t2):\n \"\"\"\n :type t1: TreeNode\n :type t2: TreeNode\n :rtype: TreeNode\n \"\"\"\n if t1 and t2:\n root = TreeNode(t1.val + t2.val)\n root.left = self.mergeTrees(t1.left, t2.left)\n root.right = self.mergeTrees(t1.right, t2.right)\n return root\n else:\n return t1 or t2\n\n\nclass TestMethods(unittest.TestCase):\n\n def test_Local(self):\n self.assertEqual(1, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n<mask token>\n",
"step-4": "__source__ = 'https://leetcode.com/problems/merge-two-binary-trees/'\n<mask token>\n\n\nclass TreeNode(object):\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n\n def mergeTrees(self, t1, t2):\n \"\"\"\n :type t1: TreeNode\n :type t2: TreeNode\n :rtype: TreeNode\n \"\"\"\n if t1 and t2:\n root = TreeNode(t1.val + t2.val)\n root.left = self.mergeTrees(t1.left, t2.left)\n root.right = self.mergeTrees(t1.right, t2.right)\n return root\n else:\n return t1 or t2\n\n\nclass TestMethods(unittest.TestCase):\n\n def test_Local(self):\n self.assertEqual(1, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\nJava = \"\"\"\n# Thought: https://leetcode.com/problems/merge-two-binary-trees/solution/\n/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\n# DFS\n# 10ms 40.59%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 != null && t2 != null) {\n TreeNode root = new TreeNode(t1.val + t2.val);\n root.left = mergeTrees(t1.left, t2.left);\n root.right = mergeTrees(t1.right, t2.right);\n return root;\n } else if (t1 == null) {\n return t2;\n } else {\n return t1;\n }\n }\n}\n\n# DFS\n# 6ms 98.05%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 == null)\n return t2;\n if (t2 == null)\n return t1;\n t1.val += t2.val;\n t1.left = mergeTrees(t1.left, t2.left);\n t1.right = mergeTrees(t1.right, t2.right);\n return t1;\n }\n}\n\n\n# BFS\n# 8ms 69.45%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 == null)\n return t2;\n Stack < TreeNode[] > stack = new Stack < > ();\n stack.push(new TreeNode[] {t1, t2});\n while (!stack.isEmpty()) {\n TreeNode[] t = stack.pop();\n if (t[0] == null || t[1] == null) {\n continue;\n }\n t[0].val += t[1].val;\n if (t[0].left == null) {\n t[0].left = t[1].left;\n } else {\n stack.push(new TreeNode[] {t[0].left, t[1].left});\n }\n if (t[0].right == null) {\n t[0].right = t[1].right;\n } else {\n stack.push(new TreeNode[] {t[0].right, t[1].right});\n }\n }\n return t1;\n }\n}\n\"\"\"\n",
"step-5": "__source__ = 'https://leetcode.com/problems/merge-two-binary-trees/'\n# Time: O(n)\n# Space: O(n)\n#\n# Description: Leetcode # 617. Merge Two Binary Trees\n#\n# Given two binary trees and imagine that when you put one of them to cover the other,\n# some nodes of the two trees are overlapped while the others are not.\n#\n# You need to merge them into a new binary tree. The merge rule is that if two nodes overlap,\n# then sum node values up as the new value of the merged node. Otherwise,\n# the NOT null node will be used as the node of new tree.\n#\n# Example 1:\n# Input:\n# \tTree 1 Tree 2\n# 1 2\n# / \\ / \\\n# 3 2 1 3\n# / \\ \\\n# 5 4 7\n# Output:\n# Merged tree:\n# \t 3\n# \t / \\\n# \t 4 5\n# \t / \\ \\\n# \t 5 4 7\n# Note: The merging process must start from the root nodes of both trees.\n#\n# Hide Company Tags Amazon\n# Hide Tags Tree\n#\nimport unittest\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# 68ms 68.16%\nclass Solution(object):\n def mergeTrees(self, t1, t2):\n \"\"\"\n :type t1: TreeNode\n :type t2: TreeNode\n :rtype: TreeNode\n \"\"\"\n if t1 and t2:\n root = TreeNode(t1.val + t2.val)\n root.left = self.mergeTrees(t1.left, t2.left)\n root.right = self.mergeTrees(t1.right, t2.right)\n return root\n else:\n return t1 or t2\n\nclass TestMethods(unittest.TestCase):\n def test_Local(self):\n self.assertEqual(1, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n\nJava = '''\n# Thought: https://leetcode.com/problems/merge-two-binary-trees/solution/\n/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\n# DFS\n# 10ms 40.59%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 != null && t2 != null) {\n TreeNode root = new TreeNode(t1.val + t2.val);\n root.left = mergeTrees(t1.left, t2.left);\n root.right = mergeTrees(t1.right, t2.right);\n return root;\n } else if (t1 == null) {\n return t2;\n } else {\n return t1;\n }\n }\n}\n\n# DFS\n# 6ms 98.05%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 == null)\n return t2;\n if (t2 == null)\n return t1;\n t1.val += t2.val;\n t1.left = mergeTrees(t1.left, t2.left);\n t1.right = mergeTrees(t1.right, t2.right);\n return t1;\n }\n}\n\n\n# BFS\n# 8ms 69.45%\nclass Solution {\n public TreeNode mergeTrees(TreeNode t1, TreeNode t2) {\n if (t1 == null)\n return t2;\n Stack < TreeNode[] > stack = new Stack < > ();\n stack.push(new TreeNode[] {t1, t2});\n while (!stack.isEmpty()) {\n TreeNode[] t = stack.pop();\n if (t[0] == null || t[1] == null) {\n continue;\n }\n t[0].val += t[1].val;\n if (t[0].left == null) {\n t[0].left = t[1].left;\n } else {\n stack.push(new TreeNode[] {t[0].left, t[1].left});\n }\n if (t[0].right == null) {\n t[0].right = t[1].right;\n } else {\n stack.push(new TreeNode[] {t[0].right, t[1].right});\n }\n }\n return t1;\n }\n}\n'''\n",
"step-ids": [
3,
4,
7,
8,
10
]
}
|
[
3,
4,
7,
8,
10
] |
def primo(num):
if num < 1:
print(f"El numero {num} no es primo")
return None
else:
if num == 2:
print(f"El numero {num} es primo")
return None
else:
for i in range(2, num):
if num % i == 0:
print(f"El numero {num} no es primo")
return None
print(f"El numero {num} es primo")
def leerNumero():
numer = int(input("Escribe un numero ==> "))
primo(numer)
def main():
leerNumero()
if __name__ =="__main__":
main()
|
normal
|
{
"blob_id": "29eb1a1642d38160c138733e269bb3ba0c5d4bba",
"index": 9834,
"step-1": "<mask token>\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\n<mask token>\n",
"step-3": "def primo(num):\n if num < 1:\n print(f'El numero {num} no es primo')\n return None\n elif num == 2:\n print(f'El numero {num} es primo')\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f'El numero {num} no es primo')\n return None\n print(f'El numero {num} es primo')\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\n<mask token>\n",
"step-4": "def primo(num):\n if num < 1:\n print(f'El numero {num} no es primo')\n return None\n elif num == 2:\n print(f'El numero {num} es primo')\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f'El numero {num} no es primo')\n return None\n print(f'El numero {num} es primo')\n\n\ndef leerNumero():\n numer = int(input('Escribe un numero ==> '))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\ndef primo(num):\n if num < 1:\n print(f\"El numero {num} no es primo\")\n return None\n else:\n if num == 2:\n print(f\"El numero {num} es primo\")\n return None\n else:\n for i in range(2, num):\n if num % i == 0:\n print(f\"El numero {num} no es primo\")\n return None\n print(f\"El numero {num} es primo\") \n\n\ndef leerNumero():\n numer = int(input(\"Escribe un numero ==> \"))\n primo(numer)\n\n\ndef main():\n leerNumero()\n\n\nif __name__ ==\"__main__\":\n main() ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.apps import AppConfig
class EasyTechConfig(AppConfig):
name = 'easy_tech'
|
normal
|
{
"blob_id": "0ef172ced411213c0f7daccd632f8d5ec97379c3",
"index": 5604,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass EasyTechConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass EasyTechConfig(AppConfig):\n name = 'easy_tech'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass EasyTechConfig(AppConfig):\n name = 'easy_tech'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pandas as pd
import math
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import yfinance as yf
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
import tensorflow
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = r"models\basic_lstm.h5"
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
# dataY.append(dataset.iloc[i + look_back][0])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[["Close"]]
elif num_of_features == 2:
dataset = stock_h[["Close", "Open"]]
elif num_of_features == 4:
dataset = stock_h[["Close", "Open", "Low", "High"]]
elif num_of_features == 5:
dataset = stock_h[["Close", "Open", "Low", "High", "Volume"]]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None, type="start"):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == "start":
dataset = dataset.loc[split_date_old:]
if type == "end":
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
# train_size = int(len(dataset) * 0.67)
# test_size = len(dataset) - train_size
# train = dataset[0:train_size, :]
# test = dataset[train_size:len(dataset), :]
# print(len(train), len(test))
print(f"Train: {len(train)}, Test: {len(test)}")
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
# train, test = self.split_dataset(dataset, "2020-09-01", initial_data_cut="2020-01-01", type="start")
# train, test = self.split_dataset(dataset, "2017-02-01")
# val, test = self.split_dataset(test, "2021-01-01")
# train, test = self.split_dataset(dataset, "2017-01-01", initial_data_cut="2019-01-01", type="end")
train, test = self.split_dataset(dataset, "2019-01-01")
train, val = self.split_dataset(train, "2014-01-01")
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
# It can be used to reconstruct the model identically.
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print("[INFO] MODEL LOADED...")
else:
# input_shape = (look_back, 1)
input_shape = (num_of_features, look_back)
model = Sequential()
model.add(
LSTM(32, activation="relu", input_shape=input_shape))
# model.add(
# Conv1D(filters=32, kernel_size=5, strides=1, padding="same", activation="relu",
# input_shape=input_shape))
# lstm_model.add(Dropout(0.1))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
# callbacks=[early_stop]
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1, validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print("[INFO] MODEL SAVED...")
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
# testR2 = r2_score(testY[:, 0], testPredict[:, 0])
# print('Test R2: %.2f ' % (testR2))
# valR2 = r2_score(valY[:, 0], valPredict[:, 0])
# print('Val R2: %.2f ' % (valR2))
# trainR2 = r2_score(trainY[:, 0], trainPredict[:, 0])
# print('Train R2: %.2f ' % (trainR2))
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % (testR2))
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % (valR2))
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % (trainR2))
feature_i = 0
plt.plot(test.index[look_back+1:], testY[:, feature_i].ravel(), label="Test_obs")
plt.plot(test.index[look_back+1:], testPredict[:, feature_i].ravel(), label="Test_pred")
plt.plot(val.index[look_back+1:], valY[:, feature_i].ravel(), label="Val_obs")
plt.plot(val.index[look_back+1:], valPredict[:, feature_i].ravel(), label="Val_pred")
plt.plot(train.index[look_back+1:], trainY[:, feature_i].ravel(), label="Train_obs")
plt.plot(train.index[look_back+1:], trainPredict[:, feature_i].ravel(), label="Train_pred")
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, "2016-01-01", initial_data_cut="2019-01-01")
# train, test = self.split_dataset(dataset, "2018-01-01")
train, val = self.split_dataset(train, "2012-01-01")
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2, validation_data=(valX, valY),
callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % (trainScore))
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
# train, test = split_dataset(dataset, "2019-01-01", initial_data_cut="2018-01-01")
train, test = self.split_dataset(dataset, "2017-01-01")
val, test = self.split_dataset(test, "2019-01-01")
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
# trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
# valX = np.reshape(valX, (valX.shape[0], 1, valX.shape[1]))
# testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
# It can be used to reconstruct the model identically.
if os.path.exists("models\stateful_lstm.h5"):
model = tensorflow.keras.models.load_model("models\stateful_lstm.h5")
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
for i in range(EPOCHS):
print(f"[INFO] EPOCH: {i}/{EPOCHS}")
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(valX, valY))
# model.reset_states()
model.save("models\stateful_lstm.h5")
# model.save("stateful_lstm")
# model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2, validation_data=(valX, valY),
# callbacks=[early_stop])
trainPredict = model.predict(trainX, batch_size=batch_size)
# model.reset_states()
testPredict = model.predict(testX, batch_size=batch_size)
# trainPredict = model.predict(trainX)
# testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
#
trainScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
# # shift train predictions for plotting
# trainPredictPlot = np.empty_like(dataset)
# trainPredictPlot[:, :] = np.nan
# trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
# # shift test predictions for plotting
# testPredictPlot = np.empty_like(dataset)
# testPredictPlot[:, :] = np.nan
# testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# # plot baseline and predictions
# # plt.plot(scaler.inverse_transform(dataset))
# plt.plot(trainPredictPlot)
# plt.plot(testPredictPlot)
# plt.show()
|
normal
|
{
"blob_id": "97ea837961c92b5c92a93ec33ac016de7ff1e876",
"index": 2449,
"step-1": "<mask token>\n\n\nclass simpleLSTM:\n <mask token>\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n <mask token>\n <mask token>\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n <mask token>\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-2": "<mask token>\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n <mask token>\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-3": "<mask token>\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[['Close']]\n elif num_of_features == 2:\n dataset = stock_h[['Close', 'Open']]\n elif num_of_features == 4:\n dataset = stock_h[['Close', 'Open', 'Low', 'High']]\n elif num_of_features == 5:\n dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport math\nimport sklearn\nimport sklearn.preprocessing\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\nimport yfinance as yf\nimport math\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport tensorflow\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[['Close']]\n elif num_of_features == 2:\n dataset = stock_h[['Close', 'Open']]\n elif num_of_features == 4:\n dataset = stock_h[['Close', 'Open', 'Low', 'High']]\n elif num_of_features == 5:\n dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport math\nimport sklearn\nimport sklearn.preprocessing\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\nimport yfinance as yf\n\nimport math\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport tensorflow\n\n\nclass simpleLSTM:\n def __init__(self):\n self.MODEL_PATH = r\"models\\basic_lstm.h5\"\n\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:(i + look_back)]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n # dataY.append(dataset.iloc[i + look_back][0])\n return np.array(dataX), np.array(dataY)\n\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[[\"Close\"]]\n elif num_of_features == 2:\n dataset = stock_h[[\"Close\", \"Open\"]]\n elif num_of_features == 4:\n dataset = stock_h[[\"Close\", \"Open\", \"Low\", \"High\"]]\n elif num_of_features == 5:\n dataset = stock_h[[\"Close\", \"Open\", \"Low\", \"High\", \"Volume\"]]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None, type=\"start\"):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == \"start\":\n dataset = dataset.loc[split_date_old:]\n if type == \"end\":\n dataset = dataset.loc[:split_date_old]\n\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n\n # train_size = int(len(dataset) * 0.67)\n # test_size = len(dataset) - train_size\n # train = dataset[0:train_size, :]\n # test = dataset[train_size:len(dataset), :]\n # print(len(train), len(test))\n print(f\"Train: {len(train)}, Test: {len(test)}\")\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n # train, test = self.split_dataset(dataset, \"2020-09-01\", initial_data_cut=\"2020-01-01\", type=\"start\")\n # train, test = self.split_dataset(dataset, \"2017-02-01\")\n # val, test = self.split_dataset(test, \"2021-01-01\")\n # train, test = self.split_dataset(dataset, \"2017-01-01\", initial_data_cut=\"2019-01-01\", type=\"end\")\n train, test = self.split_dataset(dataset, \"2019-01-01\")\n train, val = self.split_dataset(train, \"2014-01-01\")\n\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n SAVE = False\n # It can be used to reconstruct the model identically.\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print(\"[INFO] MODEL LOADED...\")\n else:\n # input_shape = (look_back, 1)\n input_shape = (num_of_features, look_back)\n model = Sequential()\n model.add(\n LSTM(32, activation=\"relu\", input_shape=input_shape))\n # model.add(\n # Conv1D(filters=32, kernel_size=5, strides=1, padding=\"same\", activation=\"relu\",\n # input_shape=input_shape))\n # lstm_model.add(Dropout(0.1))\n model.add(Dropout(0.2))\n\n model.add(Dense(num_of_features, activation='relu'))\n\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n # callbacks=[early_stop]\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1, validation_data=(valX, valY))\n\n model.save(self.MODEL_PATH)\n print(\"[INFO] MODEL SAVED...\")\n\n trainPredict = model.predict(trainX)\n\n valPredict = model.predict(valX)\n\n testPredict = model.predict(testX)\n\n\n # testR2 = r2_score(testY[:, 0], testPredict[:, 0])\n # print('Test R2: %.2f ' % (testR2))\n # valR2 = r2_score(valY[:, 0], valPredict[:, 0])\n # print('Val R2: %.2f ' % (valR2))\n # trainR2 = r2_score(trainY[:, 0], trainPredict[:, 0])\n # print('Train R2: %.2f ' % (trainR2))\n\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % (testR2))\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % (valR2))\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % (trainR2))\n\n feature_i = 0\n\n plt.plot(test.index[look_back+1:], testY[:, feature_i].ravel(), label=\"Test_obs\")\n plt.plot(test.index[look_back+1:], testPredict[:, feature_i].ravel(), label=\"Test_pred\")\n plt.plot(val.index[look_back+1:], valY[:, feature_i].ravel(), label=\"Val_obs\")\n plt.plot(val.index[look_back+1:], valPredict[:, feature_i].ravel(), label=\"Val_pred\")\n plt.plot(train.index[look_back+1:], trainY[:, feature_i].ravel(), label=\"Train_obs\")\n plt.plot(train.index[look_back+1:], trainPredict[:, feature_i].ravel(), label=\"Train_pred\")\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, \"2016-01-01\", initial_data_cut=\"2019-01-01\")\n # train, test = self.split_dataset(dataset, \"2018-01-01\")\n train, val = self.split_dataset(train, \"2012-01-01\")\n\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n SAVE = True\n\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2, validation_data=(valX, valY),\n callbacks=[early_stop])\n\n model.save(self.MODEL_PATH)\n\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n\n # trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))\n # print('Train Score: %.2f RMSE' % (trainScore))\n # testScore = math.sqrt(mean_squared_error(testY, testPredict))\n # print('Test Score: %.2f RMSE' % (testScore))\n\n\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % (trainScore))\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % (testScore))\n\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n # train, test = split_dataset(dataset, \"2019-01-01\", initial_data_cut=\"2018-01-01\")\n train, test = self.split_dataset(dataset, \"2017-01-01\")\n val, test = self.split_dataset(test, \"2019-01-01\")\n\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n # reshape input to be [samples, time steps, features]\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n\n # trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n # valX = np.reshape(valX, (valX.shape[0], 1, valX.shape[1]))\n # testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n\n # It can be used to reconstruct the model identically.\n if os.path.exists(\"models\\stateful_lstm.h5\"):\n model = tensorflow.keras.models.load_model(\"models\\stateful_lstm.h5\")\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n\n for i in range(EPOCHS):\n print(f\"[INFO] EPOCH: {i}/{EPOCHS}\")\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(valX, valY))\n # model.reset_states()\n\n model.save(\"models\\stateful_lstm.h5\")\n # model.save(\"stateful_lstm\")\n\n # model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2, validation_data=(valX, valY),\n # callbacks=[early_stop])\n trainPredict = model.predict(trainX, batch_size=batch_size)\n # model.reset_states()\n testPredict = model.predict(testX, batch_size=batch_size)\n\n # trainPredict = model.predict(trainX)\n # testPredict = model.predict(testX)\n\n # trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))\n # print('Train Score: %.2f RMSE' % (trainScore))\n # testScore = math.sqrt(mean_squared_error(testY, testPredict))\n # print('Test Score: %.2f RMSE' % (testScore))\n #\n\n\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % (trainScore))\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:, 0]))\n print('Test Score: %.2f RMSE' % (testScore))\n\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n # # shift train predictions for plotting\n # trainPredictPlot = np.empty_like(dataset)\n # trainPredictPlot[:, :] = np.nan\n # trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict\n # # shift test predictions for plotting\n # testPredictPlot = np.empty_like(dataset)\n # testPredictPlot[:, :] = np.nan\n # testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict\n # # plot baseline and predictions\n # # plt.plot(scaler.inverse_transform(dataset))\n # plt.plot(trainPredictPlot)\n # plt.plot(testPredictPlot)\n # plt.show()\n\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
<|reserved_special_token_0|>
def group_add(request):
if request.method == 'POST':
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name, request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
<|reserved_special_token_0|>
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
<|reserved_special_token_0|>
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s' % group.checked_out_by)
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name, request.user)
return render(request, 'group_detail.html', {'group': group})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
<|reserved_special_token_0|>
def group_add(request):
if request.method == 'POST':
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name, request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
<|reserved_special_token_0|>
def group_import(request):
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkout_sql_all()
grpCur = con.get_cursor(grpSQL)
for rec in con.get_data(grpCur):
Group.objects.create(group_name=rec[0], parent=Group.objects.filter
(group_name=rec[1]).first(), strg_period=rec[2],
strg_period_type=rec[3])
con.close()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_checkin(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, '', IfNoneThenNull
(group.strg_period), IfNoneThenNull(group.strg_period_type,
''), group.deleted)
else:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, group.parent.
group_name, IfNoneThenNull(group.strg_period),
IfNoneThenNull(group.strg_period_type, ''), group.deleted)
try:
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
con.exec(grpSQL)
group.checkin(group.group_name)
except NameError as err:
print(err)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('Взято на изменение пользователем %s' % group.checked_out_by)
elif err.args[0] == 'NoneCheckoutUser':
print('Сначала необходимо взять на изменение')
else:
print(err)
if group.deleted == 1:
group.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
else:
return redirect('group_detail', pk=pk)
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s' % group.checked_out_by)
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name, request.user)
return render(request, 'group_detail.html', {'group': group})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_detail(request, pk):
group = get_object_or_404(Group, pk=pk)
return render(request, 'group_detail.html', {'group': group})
def group_add(request):
if request.method == 'POST':
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name, request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
<|reserved_special_token_0|>
def group_del(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
group.remove(group.group_name)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s' % (group.
group_name, group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение' % group.
group_name)
return render(request, 'group_detail.html', {'group': group})
def group_import(request):
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkout_sql_all()
grpCur = con.get_cursor(grpSQL)
for rec in con.get_data(grpCur):
Group.objects.create(group_name=rec[0], parent=Group.objects.filter
(group_name=rec[1]).first(), strg_period=rec[2],
strg_period_type=rec[3])
con.close()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_checkin(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, '', IfNoneThenNull
(group.strg_period), IfNoneThenNull(group.strg_period_type,
''), group.deleted)
else:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, group.parent.
group_name, IfNoneThenNull(group.strg_period),
IfNoneThenNull(group.strg_period_type, ''), group.deleted)
try:
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
con.exec(grpSQL)
group.checkin(group.group_name)
except NameError as err:
print(err)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('Взято на изменение пользователем %s' % group.checked_out_by)
elif err.args[0] == 'NoneCheckoutUser':
print('Сначала необходимо взять на изменение')
else:
print(err)
if group.deleted == 1:
group.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
else:
return redirect('group_detail', pk=pk)
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s' % group.checked_out_by)
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name, request.user)
return render(request, 'group_detail.html', {'group': group})
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Group, SQLlist
from .forms import GroupForm
from .oraConnect import *
from .utils import IfNoneThenNull
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_detail(request, pk):
group = get_object_or_404(Group, pk=pk)
return render(request, 'group_detail.html', {'group': group})
def group_add(request):
if request.method == 'POST':
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name, request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
def group_edit(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, '', IfNoneThenNull
(group.strg_period), IfNoneThenNull(group.strg_period_type,
''), group.deleted)
if request.method == 'POST':
form = GroupForm(request.POST, instance=group)
if form.is_valid():
group = form.save(commit=False)
group.edit(group.group_name, request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm(instance=group)
return render(request, 'group_edit.html', {'form': form})
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s' % (group.
group_name, group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение' % group.
group_name)
return redirect('group_detail', pk=group.pk)
def group_del(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
group.remove(group.group_name)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s' % (group.
group_name, group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение' % group.
group_name)
return render(request, 'group_detail.html', {'group': group})
def group_import(request):
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkout_sql_all()
grpCur = con.get_cursor(grpSQL)
for rec in con.get_data(grpCur):
Group.objects.create(group_name=rec[0], parent=Group.objects.filter
(group_name=rec[1]).first(), strg_period=rec[2],
strg_period_type=rec[3])
con.close()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_checkin(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, '', IfNoneThenNull
(group.strg_period), IfNoneThenNull(group.strg_period_type,
''), group.deleted)
else:
grpSQL = SQLlist.objects.filter(sql_name='Group').first(
).get_checkin_sql() % (group.group_name, group.parent.
group_name, IfNoneThenNull(group.strg_period),
IfNoneThenNull(group.strg_period_type, ''), group.deleted)
try:
con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')
con.exec(grpSQL)
group.checkin(group.group_name)
except NameError as err:
print(err)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('Взято на изменение пользователем %s' % group.checked_out_by)
elif err.args[0] == 'NoneCheckoutUser':
print('Сначала необходимо взять на изменение')
else:
print(err)
if group.deleted == 1:
group.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
else:
return redirect('group_detail', pk=pk)
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if (group.checked_out_by is not None and group.checked_out_by !=
request.user.username):
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s' % group.checked_out_by)
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name, request.user)
return render(request, 'group_detail.html', {'group': group})
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Group,SQLlist
from .forms import GroupForm
from .oraConnect import *
from .utils import IfNoneThenNull
########################### Группы ############################
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_detail(request, pk):
group = get_object_or_404(Group, pk=pk)
return render(request, 'group_detail.html', {'group': group})
def group_add(request):
if request.method == "POST":
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name,request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
def group_edit(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
if request.method == "POST":
form = GroupForm(request.POST, instance=group)
if form.is_valid():
group = form.save(commit=False)
group.edit(group.group_name,request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm(instance=group)
return render(request, 'group_edit.html', {'form': form})
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение'%(group.group_name))
return redirect('group_detail', pk=group.pk)
def group_del(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
group.remove(group.group_name)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение'%(group.group_name))
return render(request, 'group_detail.html', {'group': group})
def group_import(request):
con = OraConnection('DM_SKB','DWHLegator000000','XE')
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkout_sql_all()
grpCur = con.get_cursor(grpSQL)
# Не забываем, что есть dir(), с чьей помощью можно узнать очень
# много полезного об инстансе курсорной переменной
#print('grpCur: ', dir(grpCur))
#print('grpCur.getvalue(): ', dir(grpCur.getvalue()))
# описание полей запроса
#for col in con.get_fields(entCur):
# print(col)
for rec in con.get_data(grpCur):
#try:
# Group.objects.filter(group_name = rec[0]).first().delete()
#except:
# None
Group.objects.create(group_name = rec[0],parent = Group.objects.filter(group_name = rec[1]).first(),strg_period = rec[2],strg_period_type = rec[3])
# не забываем закрывать за собой соединение с Ораклом
con.close()
groups = Group.objects.all()
return render(request,'group_list.html',{'groups': groups})
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_checkin(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
else:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,group.parent.group_name,IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
try:
con = OraConnection('DM_SKB','DWHLegator000000','XE')
con.exec(grpSQL);
group.checkin(group.group_name)
except NameError as err:
print(err)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('Взято на изменение пользователем %s'%(group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('Сначала необходимо взять на изменение')
else:
print(err)
if group.deleted == 1:
group.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
else:
return redirect('group_detail', pk=pk)
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
#Если взято на изменение другим пользователем
try:
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s'%(group.checked_out_by))
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name,request.user)
return render(request, 'group_detail.html', {'group': group})
########################### Окончание Группы ############################
|
flexible
|
{
"blob_id": "b9fe758d5fe12b5a15097c0e5a33cb2d57edfdd2",
"index": 7484,
"step-1": "<mask token>\n\n\ndef group_list(request):\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\n<mask token>\n\n\ndef group_add(request):\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save(commit=False)\n group.add(group.group_name, request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm()\n return render(request, 'group_edit.html', {'form': form})\n\n\n<mask token>\n\n\ndef group_truncate(request):\n for rec in Group.objects.all():\n rec.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\n<mask token>\n\n\ndef group_checkout(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n except NameError:\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n if group.checked_out_by is None or group.checked_out_by == request.user:\n group.checkout(group.group_name, request.user)\n return render(request, 'group_detail.html', {'group': group})\n",
"step-2": "<mask token>\n\n\ndef group_list(request):\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\n<mask token>\n\n\ndef group_add(request):\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save(commit=False)\n group.add(group.group_name, request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm()\n return render(request, 'group_edit.html', {'form': form})\n\n\n<mask token>\n\n\ndef group_import(request):\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkout_sql_all()\n grpCur = con.get_cursor(grpSQL)\n for rec in con.get_data(grpCur):\n Group.objects.create(group_name=rec[0], parent=Group.objects.filter\n (group_name=rec[1]).first(), strg_period=rec[2],\n strg_period_type=rec[3])\n con.close()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_truncate(request):\n for rec in Group.objects.all():\n rec.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_checkin(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, '', IfNoneThenNull\n (group.strg_period), IfNoneThenNull(group.strg_period_type,\n ''), group.deleted)\n else:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, group.parent.\n group_name, IfNoneThenNull(group.strg_period),\n IfNoneThenNull(group.strg_period_type, ''), group.deleted)\n try:\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n con.exec(grpSQL)\n group.checkin(group.group_name)\n except NameError as err:\n print(err)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n elif err.args[0] == 'NoneCheckoutUser':\n print('Сначала необходимо взять на изменение')\n else:\n print(err)\n if group.deleted == 1:\n group.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n else:\n return redirect('group_detail', pk=pk)\n\n\ndef group_checkout(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n except NameError:\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n if group.checked_out_by is None or group.checked_out_by == request.user:\n group.checkout(group.group_name, request.user)\n return render(request, 'group_detail.html', {'group': group})\n",
"step-3": "<mask token>\n\n\ndef group_list(request):\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_detail(request, pk):\n group = get_object_or_404(Group, pk=pk)\n return render(request, 'group_detail.html', {'group': group})\n\n\ndef group_add(request):\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save(commit=False)\n group.add(group.group_name, request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm()\n return render(request, 'group_edit.html', {'form': form})\n\n\n<mask token>\n\n\ndef group_del(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n group.remove(group.group_name)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('\"%s\" Взято на изменение пользователем %s' % (group.\n group_name, group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('\"%s\" сначала необходимо взять на изменение' % group.\n group_name)\n return render(request, 'group_detail.html', {'group': group})\n\n\ndef group_import(request):\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkout_sql_all()\n grpCur = con.get_cursor(grpSQL)\n for rec in con.get_data(grpCur):\n Group.objects.create(group_name=rec[0], parent=Group.objects.filter\n (group_name=rec[1]).first(), strg_period=rec[2],\n strg_period_type=rec[3])\n con.close()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_truncate(request):\n for rec in Group.objects.all():\n rec.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_checkin(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, '', IfNoneThenNull\n (group.strg_period), IfNoneThenNull(group.strg_period_type,\n ''), group.deleted)\n else:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, group.parent.\n group_name, IfNoneThenNull(group.strg_period),\n IfNoneThenNull(group.strg_period_type, ''), group.deleted)\n try:\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n con.exec(grpSQL)\n group.checkin(group.group_name)\n except NameError as err:\n print(err)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n elif err.args[0] == 'NoneCheckoutUser':\n print('Сначала необходимо взять на изменение')\n else:\n print(err)\n if group.deleted == 1:\n group.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n else:\n return redirect('group_detail', pk=pk)\n\n\ndef group_checkout(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n except NameError:\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n if group.checked_out_by is None or group.checked_out_by == request.user:\n group.checkout(group.group_name, request.user)\n return render(request, 'group_detail.html', {'group': group})\n",
"step-4": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Group, SQLlist\nfrom .forms import GroupForm\nfrom .oraConnect import *\nfrom .utils import IfNoneThenNull\n\n\ndef group_list(request):\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_detail(request, pk):\n group = get_object_or_404(Group, pk=pk)\n return render(request, 'group_detail.html', {'group': group})\n\n\ndef group_add(request):\n if request.method == 'POST':\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save(commit=False)\n group.add(group.group_name, request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm()\n return render(request, 'group_edit.html', {'form': form})\n\n\ndef group_edit(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, '', IfNoneThenNull\n (group.strg_period), IfNoneThenNull(group.strg_period_type,\n ''), group.deleted)\n if request.method == 'POST':\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n group = form.save(commit=False)\n group.edit(group.group_name, request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm(instance=group)\n return render(request, 'group_edit.html', {'form': form})\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('\"%s\" Взято на изменение пользователем %s' % (group.\n group_name, group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('\"%s\" сначала необходимо взять на изменение' % group.\n group_name)\n return redirect('group_detail', pk=group.pk)\n\n\ndef group_del(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n group.remove(group.group_name)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('\"%s\" Взято на изменение пользователем %s' % (group.\n group_name, group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('\"%s\" сначала необходимо взять на изменение' % group.\n group_name)\n return render(request, 'group_detail.html', {'group': group})\n\n\ndef group_import(request):\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkout_sql_all()\n grpCur = con.get_cursor(grpSQL)\n for rec in con.get_data(grpCur):\n Group.objects.create(group_name=rec[0], parent=Group.objects.filter\n (group_name=rec[1]).first(), strg_period=rec[2],\n strg_period_type=rec[3])\n con.close()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_truncate(request):\n for rec in Group.objects.all():\n rec.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\n\ndef group_checkin(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, '', IfNoneThenNull\n (group.strg_period), IfNoneThenNull(group.strg_period_type,\n ''), group.deleted)\n else:\n grpSQL = SQLlist.objects.filter(sql_name='Group').first(\n ).get_checkin_sql() % (group.group_name, group.parent.\n group_name, IfNoneThenNull(group.strg_period),\n IfNoneThenNull(group.strg_period_type, ''), group.deleted)\n try:\n con = OraConnection('DM_SKB', 'DWHLegator000000', 'XE')\n con.exec(grpSQL)\n group.checkin(group.group_name)\n except NameError as err:\n print(err)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n elif err.args[0] == 'NoneCheckoutUser':\n print('Сначала необходимо взять на изменение')\n else:\n print(err)\n if group.deleted == 1:\n group.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n else:\n return redirect('group_detail', pk=pk)\n\n\ndef group_checkout(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if (group.checked_out_by is not None and group.checked_out_by !=\n request.user.username):\n raise NameError('OtherCheckoutUser')\n except NameError:\n print('Взято на изменение пользователем %s' % group.checked_out_by)\n if group.checked_out_by is None or group.checked_out_by == request.user:\n group.checkout(group.group_name, request.user)\n return render(request, 'group_detail.html', {'group': group})\n",
"step-5": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Group,SQLlist\nfrom .forms import GroupForm\nfrom .oraConnect import *\nfrom .utils import IfNoneThenNull\n\n########################### Группы ############################\ndef group_list(request):\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n \ndef group_detail(request, pk):\n group = get_object_or_404(Group, pk=pk)\n return render(request, 'group_detail.html', {'group': group})\n\ndef group_add(request):\n if request.method == \"POST\":\n form = GroupForm(request.POST)\n if form.is_valid():\n group = form.save(commit=False)\n group.add(group.group_name,request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm()\n return render(request, 'group_edit.html', {'form': form})\n \ndef group_edit(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if group.checked_out_by is not None and group.checked_out_by != request.user.username:\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)\n if request.method == \"POST\":\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n group = form.save(commit=False)\n group.edit(group.group_name,request.user)\n return redirect('group_detail', pk=group.pk)\n else:\n form = GroupForm(instance=group)\n return render(request, 'group_edit.html', {'form': form})\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('\"%s\" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('\"%s\" сначала необходимо взять на изменение'%(group.group_name))\n return redirect('group_detail', pk=group.pk)\n\ndef group_del(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if group.checked_out_by is not None and group.checked_out_by != request.user.username:\n raise NameError('OtherCheckoutUser')\n group.remove(group.group_name)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('\"%s\" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('\"%s\" сначала необходимо взять на изменение'%(group.group_name))\n return render(request, 'group_detail.html', {'group': group})\n\ndef group_import(request):\n con = OraConnection('DM_SKB','DWHLegator000000','XE')\n grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkout_sql_all()\n\n grpCur = con.get_cursor(grpSQL)\n\n # Не забываем, что есть dir(), с чьей помощью можно узнать очень\n # много полезного об инстансе курсорной переменной\n #print('grpCur: ', dir(grpCur))\n #print('grpCur.getvalue(): ', dir(grpCur.getvalue()))\n\n # описание полей запроса\n #for col in con.get_fields(entCur):\n # print(col)\n\n for rec in con.get_data(grpCur):\n #try:\n # Group.objects.filter(group_name = rec[0]).first().delete()\n #except:\n # None\n Group.objects.create(group_name = rec[0],parent = Group.objects.filter(group_name = rec[1]).first(),strg_period = rec[2],strg_period_type = rec[3])\n # не забываем закрывать за собой соединение с Ораклом\n con.close()\n groups = Group.objects.all()\n return render(request,'group_list.html',{'groups': groups})\n\ndef group_truncate(request):\n for rec in Group.objects.all():\n rec.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n\ndef group_checkin(request, pk):\n group = get_object_or_404(Group, pk=pk)\n try:\n if group.checked_out_by is None:\n raise NameError('NoneCheckoutUser')\n if group.checked_out_by is not None and group.checked_out_by != request.user.username:\n raise NameError('OtherCheckoutUser')\n if group.parent is None:\n grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)\n else:\n grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,group.parent.group_name,IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)\n try:\n con = OraConnection('DM_SKB','DWHLegator000000','XE')\n con.exec(grpSQL);\n group.checkin(group.group_name)\n except NameError as err:\n print(err)\n except NameError as err:\n if err.args[0] == 'OtherCheckoutUser':\n print('Взято на изменение пользователем %s'%(group.checked_out_by))\n elif err.args[0] == 'NoneCheckoutUser':\n print('Сначала необходимо взять на изменение')\n else:\n print(err)\n \n if group.deleted == 1:\n group.delete()\n groups = Group.objects.all()\n return render(request, 'group_list.html', {'groups': groups})\n else: \n return redirect('group_detail', pk=pk)\n\ndef group_checkout(request, pk):\n group = get_object_or_404(Group, pk=pk)\n #Если взято на изменение другим пользователем\n try:\n if group.checked_out_by is not None and group.checked_out_by != request.user.username:\n raise NameError('OtherCheckoutUser')\n except NameError:\n print('Взято на изменение пользователем %s'%(group.checked_out_by))\n if group.checked_out_by is None or group.checked_out_by == request.user:\n group.checkout(group.group_name,request.user)\n return render(request, 'group_detail.html', {'group': group})\n########################### Окончание Группы ############################\n",
"step-ids": [
4,
6,
8,
10,
11
]
}
|
[
4,
6,
8,
10,
11
] |
class Queue(object):
def __init__(self, val_list=None):
self.stack_one = []
self.stack_two = []
if val_list:
for item in val_list:
self.stack_one.append(item)
def push(self, val=None):
if val:
self.stack_one.append(val)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Queue(object):
def __init__(self, val_list=None):
self.stack_one = []
self.stack_two = []
if val_list:
for item in val_list:
self.stack_one.append(item)
def push(self, val=None):
if val:
self.stack_one.append(val)
def pop(self):
for index in range(0, len(self.stack_one)):
self.stack_two.append(self.stack_one.pop())
self.stack_two.pop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Queue(object):
def __init__(self, val_list=None):
self.stack_one = []
self.stack_two = []
if val_list:
for item in val_list:
self.stack_one.append(item)
def push(self, val=None):
if val:
self.stack_one.append(val)
def pop(self):
for index in range(0, len(self.stack_one)):
self.stack_two.append(self.stack_one.pop())
self.stack_two.pop()
def main():
a = Queue()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Queue(object):
def __init__(self, val_list=None):
self.stack_one = []
self.stack_two = []
if val_list:
for item in val_list:
self.stack_one.append(item)
def push(self, val=None):
if val:
self.stack_one.append(val)
def pop(self):
for index in range(0, len(self.stack_one)):
self.stack_two.append(self.stack_one.pop())
self.stack_two.pop()
def main():
a = Queue()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "d4d8d800b81a50f2c520f0394412935738d1a8ee",
"index": 2986,
"step-1": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\n<mask token>\n",
"step-3": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\ndef main():\n a = Queue()\n\n\n<mask token>\n",
"step-4": "class Queue(object):\n\n def __init__(self, val_list=None):\n self.stack_one = []\n self.stack_two = []\n if val_list:\n for item in val_list:\n self.stack_one.append(item)\n\n def push(self, val=None):\n if val:\n self.stack_one.append(val)\n\n def pop(self):\n for index in range(0, len(self.stack_one)):\n self.stack_two.append(self.stack_one.pop())\n self.stack_two.pop()\n\n\ndef main():\n a = Queue()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(dir(grok))
grok.testA()
<|reserved_special_token_1|>
from mypackage.A import grok
print(dir(grok))
grok.testA()
<|reserved_special_token_1|>
# PROBLEM: Code organized in package and want to import a submodule from one o the other pkg
# submodules without hardcoding the package name into the import statement
# SOLUTION: Use pkg-relative import
# Absolete path
from mypackage.A import grok
print(dir(grok))
grok.testA()
|
flexible
|
{
"blob_id": "ad9facb9c8e552845df9171549f886f3e9cba193",
"index": 7544,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dir(grok))\ngrok.testA()\n",
"step-3": "from mypackage.A import grok\nprint(dir(grok))\ngrok.testA()\n",
"step-4": "# PROBLEM: Code organized in package and want to import a submodule from one o the other pkg\n# submodules without hardcoding the package name into the import statement\n# SOLUTION: Use pkg-relative import\n\n# Absolete path\nfrom mypackage.A import grok\n\nprint(dir(grok))\ngrok.testA()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
print(longest_word('abpcplea', ['a', 'b', 'c']))
print(longest_word('abpcplea', ['ba', 'ab', 'a', 'b']))
print(longest_word('abpcplea', ['ale', 'apple', 'monkey', 'plea']))
<|reserved_special_token_1|>
def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
print(longest_word("abpcplea", ["a", "b", "c"]))
print(longest_word("abpcplea", ["ba", "ab", "a", "b"]))
print(longest_word('abpcplea', ["ale","apple","monkey","plea"]))
|
flexible
|
{
"blob_id": "86de5b4a72978e2c49e060eefc513e3ed61272ae",
"index": 4004,
"step-1": "<mask token>\n",
"step-2": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\n<mask token>\n",
"step-3": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\nprint(longest_word('abpcplea', ['a', 'b', 'c']))\nprint(longest_word('abpcplea', ['ba', 'ab', 'a', 'b']))\nprint(longest_word('abpcplea', ['ale', 'apple', 'monkey', 'plea']))\n",
"step-4": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))\n\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\nprint(longest_word(\"abpcplea\", [\"a\", \"b\", \"c\"]))\nprint(longest_word(\"abpcplea\", [\"ba\", \"ab\", \"a\", \"b\"]))\nprint(longest_word('abpcplea', [\"ale\",\"apple\",\"monkey\",\"plea\"]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
''' Model package should containt all data types for the database engine,
which means that projects like PyCIM can be included within '''
|
flexible
|
{
"blob_id": "ce3c1a7210632d0a8475fe886d514eb91d3c75ac",
"index": 7700,
"step-1": "<mask token>\n",
"step-2": "''' Model package should containt all data types for the database engine, \nwhich means that projects like PyCIM can be included within '''",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
n =int(input("nhap gia tri"))
for i in range(1,n+1):
print(i)
|
normal
|
{
"blob_id": "21b295e28a7e4443ea116df1b22ff5074dca955a",
"index": 246,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n print(i)\n",
"step-3": "n = int(input('nhap gia tri'))\nfor i in range(1, n + 1):\n print(i)\n",
"step-4": "n =int(input(\"nhap gia tri\"))\nfor i in range(1,n+1):\n\n\n\n\n\n\n\n\n\n\n\n\n print(i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('lkft', '0021_reportjob_finished_successfully')]
operations = [migrations.AddField(model_name='cibuild', name=
'changes_num', field=models.IntegerField(default=0)), migrations.
AddField(model_name='cibuild', name='display_name', field=models.
CharField(max_length=255, null=True))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('lkft', '0021_reportjob_finished_successfully')]
operations = [migrations.AddField(model_name='cibuild', name=
'changes_num', field=models.IntegerField(default=0)), migrations.
AddField(model_name='cibuild', name='display_name', field=models.
CharField(max_length=255, null=True))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-04-09 06:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lkft', '0021_reportjob_finished_successfully'),
]
operations = [
migrations.AddField(
model_name='cibuild',
name='changes_num',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='cibuild',
name='display_name',
field=models.CharField(max_length=255, null=True),
),
]
|
flexible
|
{
"blob_id": "787397473c431d2560bf8c488af58e976c1864d0",
"index": 6730,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lkft', '0021_reportjob_finished_successfully')]\n operations = [migrations.AddField(model_name='cibuild', name=\n 'changes_num', field=models.IntegerField(default=0)), migrations.\n AddField(model_name='cibuild', name='display_name', field=models.\n CharField(max_length=255, null=True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lkft', '0021_reportjob_finished_successfully')]\n operations = [migrations.AddField(model_name='cibuild', name=\n 'changes_num', field=models.IntegerField(default=0)), migrations.\n AddField(model_name='cibuild', name='display_name', field=models.\n CharField(max_length=255, null=True))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.17 on 2021-04-09 06:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lkft', '0021_reportjob_finished_successfully'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='cibuild',\n name='changes_num',\n field=models.IntegerField(default=0),\n ),\n migrations.AddField(\n model_name='cibuild',\n name='display_name',\n field=models.CharField(max_length=255, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
#
#
##
from __future__ import print_function, unicode_literals
import inspect
import os
import pprint as pp
import time
from time import gmtime, strftime
import subprocess
from local import *
from slurm import *
class Job_status( object ):
""" Enumerate class for job statuses, this is done differently in python 3
"""
FINISHED = 1
FAILED = 2
NO_RESTART = 3
RUNNING = 4
QUEUEING = 5
RESUBMITTED = 6
SUBMITTED = 7
CREATED = 98
KILLED = 99
UNKNOWN = 100
class Job(object):
""" This class is presenting a singular job and all information associated with it.
"""
def __init__(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None):
""" Create a job object
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
Returns:
job (obj)
"""
self.status = Job_status.CREATED
self.active = True
self.command = None
self.backend = None
self.output = output
self.step_name = None
self.pre_task_ids = None
self.delete_file = None
self.job_id = None
self.backend_id = None
self.nr_of_tries = 0
self.cmd = cmd
self.step_name = step_name
self.max_memory = None
self.cputime = None
if ( limit is not None ):
self.limit = limit
if ( delete_file is not None ):
self.delete_file = delete_file
if ( thread_id is not None ):
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def __repr__(self):
return "{name} -> {status}".format( name=self.step_name, status=self.status )
def __str__(self):
return "{name}".format( name=self.step_name )
def delete_tmp_files(self):
""" deletes tmp files
Args:
None
Returns:
boolean: Success/failure
Raises:
None
"""
if self.delete_file is None:
return True
if ( isinstance(self.delete_file, str)):
self.delete_file = [ self.delete_file ]
for file_name in self.delete_file:
print( file_name)
if ( os.path.isfile( file_name )):
os.remove( file_name )
return True
class Thread( object):
def __init__( self, name, thread_id ):
self.name = name
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
class Manager( object ):
def __init__(self, pipeline):
""" Creates a manager object
"""
self._jobs = []
self._active_jobs = []
self._threads = []
self._thread_index = {}
self._thread_id = 1
self.local_backend = Local()
self.backend = None
self.pipeline = pipeline
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def add_thread(self, name):
""" Create a new thread object for the manager
Args:
name (str): name of the thread
Returns:
None
"""
thread = Thread( name=name, thread_id=self._thread_id)
self._threads.append( thread )
self._thread_index[ name ] = self._thread_id
self._thread_id += 1
def get_thread_by_name( self, name):
""" gets a thread object based on name
Args:
name (str): name of the thread
Returns:
thread (obj)
Raises:
raises an assert error if the thead does not exist
"""
assert name in self._thread_index, "No thread named {}".format( name )
return self._threads[ self._thread_index[ name ]]
def submit_job(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None, system_call=False):
""" Submits a job using the selected backend, setting up the tracking and all that jazz
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
system_call (bool): run the job as a system job (default: false )
Returns:
None
"""
job = Job(cmd, step_name, output, limit, delete_file, thread_id)
self._jobs.append( job )
job.job_id = len( self._jobs) - 1
# print( "Working on: '{}' -> {}".format( job.step_name, job.cmd ))
if ( system_call ) :
job = self.local_backend.system_call( job )
else:
job = self.backend.submit( job )
# print( job.status )
def resubmit_job(self, job):
""" resubmits a job
Args:
job (job): jobid to resubmit
"""
job.nr_of_tries += 1
job.status = Job_status.RESUBMITTED
job = self.backend.submit( job )
def killall(self):
"""kills all submitted/running jobs
"""
for job_id, job in self.jobs:
backend.kill( job )
def job_outputs( self, step_name=None):
"""
Args:
step_name (str): name of the step to collect outputs from
Returns:
list of outputs
"""
outputs = []
prev_steps = self.pipeline._workflow.prev_steps( step_name )
# print("{} :: Prev steps to collect outputs from: {}".format( step_name, prev_steps))
for job in self._jobs:
if job.step_name in prev_steps:
outputs.append( job.output )
# print("{}".format( outputs))
return outputs
def format_memory(self, memory):
""" Format memory into a more readable format
Args:
memory (int): will be cast to float anyway
Returns
Readable memory (str)
"""
memory = float( memory)
if memory is None or memory == 0:
return "N/A"
elif ( memory > 1000000000):
return "{:.2f}GB".format(memory / 1000000000)
elif ( memory > 1000000):
return "{:.2f}MB".format(memory / 1000000)
elif ( memory > 1000):
return "{:.2f}KB".format(memory / 1000)
else:
return "{:}".format(int(memory))
def format_time( self, seconds):
""" Markes seconds into a more readable format eg: 10:03:01
Args
Seconds (int): seconds to convert into hours:mins:seconds
returns:
time (str)
"""
if seconds is None:
return "N/A"
seconds = int( seconds )
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
seconds = int(seconds )
return "{:02}:{:02}:{:02}".format( hours, minutes, seconds)
def report(self):
""" print the current progress
Args:
None
Returns:
None
"""
job_summary = {}
for job in self._jobs:
if job.step_name not in job_summary:
job_summary[ job.step_name ] = {}
job_summary[ job.step_name ][ 'DONE' ] = 0
job_summary[ job.step_name ][ 'RUNNING' ] = 0
job_summary[ job.step_name ][ 'QUEUING' ] = 0
job_summary[ job.step_name ][ 'FAILED' ] = 0
job_summary[ job.step_name ][ 'UNKNOWN' ] = 0
job_summary[ job.step_name ][ 'max_mem' ] = 0
job_summary[ job.step_name ][ 'cputime' ] = 0
if job.status == Job_status.FINISHED:
job_summary[ job.step_name ][ 'DONE' ] += 1
if job.cputime is not None:
job_summary[ job.step_name ]['cputime'] += int(job.cputime)
if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:
job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)
elif job.status == Job_status.RUNNING:
job_summary[ job.step_name ][ 'RUNNING' ] += 1
elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:
job_summary[ job.step_name ][ 'QUEUING' ] += 1
elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:
job_summary[ job.step_name ][ 'FAILED' ] += 1
else:
job_summary[ job.step_name ][ 'UNKNOWN' ] += 1
local_time = strftime("%d/%m/%Y %H:%M", time.localtime())
pickle_file = "{}.{}".format(self.pipeline.project_name, self.pipeline._pid)
print("[{} @{} {}]".format( local_time,self.pipeline._hostname , pickle_file))
print("{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}".format("Run stats", "Runtime", "Max Mem", "D","R","Q","F","U"))
for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):
if step not in job_summary:
continue
print("{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(step,
self.format_time(job_summary[ step ]['cputime']),
self.format_memory(job_summary[ step ]['max_mem']),
job_summary[ step ][ 'DONE' ],
job_summary[ step ][ 'RUNNING' ],
job_summary[ step ][ 'QUEUING' ],
job_summary[ step ][ 'FAILED' ],
job_summary[ step ][ 'UNKNOWN' ]))
def active_jobs(self):
""" updates the status of and returns all active jobs
Args:
None
Returns:
list of jobs (obj)
"""
active_jobs = []
for job in self._jobs:
if job.active:
job.backend.status( job )
active_jobs.append( job )
self._active_jobs = active_jobs[:]
return active_jobs
def waiting_for_job(self, depends_on ):
""" check if any of the running jobs are in the depends list
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if outstanding dependencies
"""
# This code is aweful, but I don't have to time and brain
# power to fix it right now
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.active and
depend_on.name == active_job.step_name ):
# print("waiting on {}".format(active_job.step_name))
return True
for depend_on in depends_on:
job_found = False
for job in self._jobs:
if (depend_on.name == job.step_name ):
job_found = True
if not job_found:
print("{} is waiting to start and finish {}".format( job.step_name, depend_on.name ))
return True
# We are not waiting for any active or steps yet to be performed
return False
def failed_dependency_jobs(self, depends_on ):
""" check if any of the running jobs this one depends on have failed.
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if one or more job has failed and cannot be restarted
"""
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.status == Job_status.NO_RESTART):
print("dependecy {} failed".format(active_job.step_name))
return True
return False
def _next_id():
''' generates and returns the next job id from the class
Returns:
Next available job id (int)
'''
self.job_id += 1
return self.job_id
|
normal
|
{
"blob_id": "222a02f97df5ded6fea49e9eb201ed784a2a2423",
"index": 5037,
"step-1": "#\n# \n# \n##\n\nfrom __future__ import print_function, unicode_literals\nimport inspect\nimport os\nimport pprint as pp\nimport time\nfrom time import gmtime, strftime\nimport subprocess\n\nfrom local import *\nfrom slurm import *\n\nclass Job_status( object ):\n \"\"\" Enumerate class for job statuses, this is done differently in python 3\n\n \"\"\"\n FINISHED = 1\n FAILED = 2\n NO_RESTART = 3\n RUNNING = 4\n QUEUEING = 5\n RESUBMITTED = 6\n SUBMITTED = 7\n CREATED = 98\n KILLED = 99\n UNKNOWN = 100\n\n\nclass Job(object):\n \"\"\" This class is presenting a singular job and all information associated with it. \n\n \"\"\"\n\n\n def __init__(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None):\n \"\"\" Create a job object\n \n Args:\n cmd (str): command to run\n step_name (str): name of the step that this command belongs to\n output (str): output information to pass on to the next job\n limit (str): paramters to pass on to the backend\n delete_file (str): File(s) to delete if the job is successful\n thread_id (int): id of the thread running this \n\n Returns:\n job (obj)\n \"\"\"\n\n self.status = Job_status.CREATED\n self.active = True\n self.command = None\n self.backend = None\n\n self.output = output\n self.step_name = None\n self.pre_task_ids = None\n self.delete_file = None\n self.job_id = None\n self.backend_id = None\n self.nr_of_tries = 0\n\n self.cmd = cmd\n self.step_name = step_name\n self.max_memory = None\n self.cputime = None\n\n\n if ( limit is not None ):\n self.limit = limit\n\n if ( delete_file is not None ):\n self.delete_file = delete_file\n\n if ( thread_id is not None ):\n self.thread_id = thread_id\n\n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n def __repr__(self):\n return \"{name} -> {status}\".format( name=self.step_name, status=self.status )\n\n def __str__(self):\n return \"{name}\".format( name=self.step_name )\n\n \n def delete_tmp_files(self):\n \"\"\" deletes tmp files \n\n Args:\n None\n\n Returns:\n boolean: Success/failure\n\n Raises:\n None\n \"\"\"\n\n if self.delete_file is None:\n return True\n\n if ( isinstance(self.delete_file, str)):\n self.delete_file = [ self.delete_file ] \n \n for file_name in self.delete_file:\n print( file_name)\n if ( os.path.isfile( file_name )):\n os.remove( file_name )\n \n return True\n \n\n\nclass Thread( object):\n\n def __init__( self, name, thread_id ):\n self.name = name\n self.thread_id = thread_id\n \n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n\nclass Manager( object ):\n\n\n def __init__(self, pipeline):\n \"\"\" Creates a manager object\n\n \"\"\"\n self._jobs = []\n self._active_jobs = []\n\n self._threads = []\n self._thread_index = {}\n self._thread_id = 1\n\n\n self.local_backend = Local()\n self.backend = None\n\n self.pipeline = pipeline\n\n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n def add_thread(self, name):\n \"\"\" Create a new thread object for the manager\n\n Args:\n name (str): name of the thread\n\n Returns:\n None\n\n \"\"\"\n\n thread = Thread( name=name, thread_id=self._thread_id)\n self._threads.append( thread )\n self._thread_index[ name ] = self._thread_id \n\n self._thread_id += 1\n\n\n def get_thread_by_name( self, name):\n \"\"\" gets a thread object based on name\n\n Args:\n name (str): name of the thread\n\n Returns:\n thread (obj)\n\n Raises:\n raises an assert error if the thead does not exist\n\n \"\"\"\n\n assert name in self._thread_index, \"No thread named {}\".format( name )\n\n return self._threads[ self._thread_index[ name ]]\n\n\n \n def submit_job(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None, system_call=False):\n \"\"\" Submits a job using the selected backend, setting up the tracking and all that jazz\n \n Args:\n cmd (str): command to run\n step_name (str): name of the step that this command belongs to\n output (str): output information to pass on to the next job\n limit (str): paramters to pass on to the backend\n delete_file (str): File(s) to delete if the job is successful\n thread_id (int): id of the thread running this \n system_call (bool): run the job as a system job (default: false )\n\n Returns:\n None\n \n \"\"\"\n\n job = Job(cmd, step_name, output, limit, delete_file, thread_id)\n self._jobs.append( job )\n job.job_id = len( self._jobs) - 1\n\n# print( \"Working on: '{}' -> {}\".format( job.step_name, job.cmd ))\n\n if ( system_call ) :\n job = self.local_backend.system_call( job )\n else:\n job = self.backend.submit( job )\n\n# print( job.status )\n \n\n \n def resubmit_job(self, job):\n \"\"\" resubmits a job\n \n Args:\n job (job): jobid to resubmit\n\n \"\"\"\n\n job.nr_of_tries += 1\n job.status = Job_status.RESUBMITTED\n job = self.backend.submit( job )\n\n\n def killall(self):\n \"\"\"kills all submitted/running jobs\n \n \"\"\"\n\n for job_id, job in self.jobs:\n backend.kill( job )\n\n\n\n def job_outputs( self, step_name=None):\n \"\"\"\n \n Args:\n step_name (str): name of the step to collect outputs from\n\n Returns:\n list of outputs\n\n \"\"\"\n\n outputs = []\n prev_steps = self.pipeline._workflow.prev_steps( step_name )\n# print(\"{} :: Prev steps to collect outputs from: {}\".format( step_name, prev_steps))\n for job in self._jobs:\n if job.step_name in prev_steps:\n outputs.append( job.output )\n\n\n# print(\"{}\".format( outputs))\n return outputs\n\n\n def format_memory(self, memory):\n \"\"\" Format memory into a more readable format\n\n Args:\n memory (int): will be cast to float anyway\n\n Returns\n Readable memory (str)\n \"\"\"\n\n memory = float( memory) \n\n if memory is None or memory == 0:\n return \"N/A\"\n elif ( memory > 1000000000):\n return \"{:.2f}GB\".format(memory / 1000000000)\n elif ( memory > 1000000):\n return \"{:.2f}MB\".format(memory / 1000000) \n elif ( memory > 1000):\n return \"{:.2f}KB\".format(memory / 1000) \n else:\n return \"{:}\".format(int(memory))\n\n\n\n def format_time( self, seconds):\n \"\"\" Markes seconds into a more readable format eg: 10:03:01\n\n Args\n Seconds (int): seconds to convert into hours:mins:seconds\n\n returns:\n time (str)\n \"\"\"\n\n if seconds is None:\n return \"N/A\"\n\n seconds = int( seconds )\n\n hours = int(seconds / 3600)\n seconds -= hours * 3600\n\n minutes = int(seconds / 60)\n seconds -= minutes * 60\n seconds = int(seconds )\n\n\n return \"{:02}:{:02}:{:02}\".format( hours, minutes, seconds)\n\n\n\n def report(self):\n \"\"\" print the current progress\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))\n \n\n \n\n\n\n def active_jobs(self):\n \"\"\" updates the status of and returns all active jobs \n\n Args:\n None\n\n Returns:\n list of jobs (obj)\n \n \"\"\"\n \n active_jobs = []\n for job in self._jobs:\n if job.active:\n job.backend.status( job )\n active_jobs.append( job )\n\n self._active_jobs = active_jobs[:]\n\n return active_jobs\n\n\n def waiting_for_job(self, depends_on ):\n \"\"\" check if any of the running jobs are in the depends list \n\n Args:\n depends_on (list obj): list of steps to check again\n\n Returns:\n boolean, True if outstanding dependencies\n\n \"\"\"\n\n # This code is aweful, but I don't have to time and brain\n # power to fix it right now\n\n for depend_on in depends_on:\n for active_job in self._active_jobs:\n if (active_job.active and \n depend_on.name == active_job.step_name ):\n# print(\"waiting on {}\".format(active_job.step_name))\n return True\n\n\n\n for depend_on in depends_on:\n job_found = False\n for job in self._jobs:\n if (depend_on.name == job.step_name ):\n job_found = True\n\n if not job_found:\n print(\"{} is waiting to start and finish {}\".format( job.step_name, depend_on.name ))\n return True\n\n\n # We are not waiting for any active or steps yet to be performed\n return False\n\n\n\n def failed_dependency_jobs(self, depends_on ):\n \"\"\" check if any of the running jobs this one depends on have failed.\n\n Args:\n depends_on (list obj): list of steps to check again\n\n Returns:\n boolean, True if one or more job has failed and cannot be restarted\n\n \"\"\"\n\n for depend_on in depends_on:\n for active_job in self._active_jobs:\n if (active_job.status == Job_status.NO_RESTART):\n print(\"dependecy {} failed\".format(active_job.step_name))\n return True\n\n return False\n \n\n\n\n def _next_id():\n\t''' generates and returns the next job id from the class\n\n\tReturns:\n Next available job id (int)\n\n\t'''\n self.job_id += 1\n\n return self.job_id\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.