code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
def easy():
print('Ok, seems like you are not good at math.')
print('What about this.')
print('Say you have 10 apples, your Mom gave you another 2.')
print('How many apples you have now?')
choice = input('> ')
if choice == '12':
print('You did a good job!')
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print('How old are you?')
choice = input('> ')
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input('> ')
while True:
if 'y' in choice:
hard()
elif 'n' in choice:
easy()
else:
print("I don't know what that mean")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def hard():
print("Nice! Let's try something harder")
print('Could you calculate this for me?')
print('4 * 35 + 18 / 2 = ')
aws = input('>')
while True:
if aws == '176':
print('Nice, you correctly answer all the questions')
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print('Ok, seems like you are not good at math.')
print('What about this.')
print('Say you have 10 apples, your Mom gave you another 2.')
print('How many apples you have now?')
choice = input('> ')
if choice == '12':
print('You did a good job!')
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print('How old are you?')
choice = input('> ')
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input('> ')
while True:
if 'y' in choice:
hard()
elif 'n' in choice:
easy()
else:
print("I don't know what that mean")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def hard():
print("Nice! Let's try something harder")
print('Could you calculate this for me?')
print('4 * 35 + 18 / 2 = ')
aws = input('>')
while True:
if aws == '176':
print('Nice, you correctly answer all the questions')
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print('Ok, seems like you are not good at math.')
print('What about this.')
print('Say you have 10 apples, your Mom gave you another 2.')
print('How many apples you have now?')
choice = input('> ')
if choice == '12':
print('You did a good job!')
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print('How old are you?')
choice = input('> ')
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input('> ')
while True:
if 'y' in choice:
hard()
elif 'n' in choice:
easy()
else:
print("I don't know what that mean")
start()
<|reserved_special_token_1|>
from sys import exit
def hard():
print("Nice! Let's try something harder")
print('Could you calculate this for me?')
print('4 * 35 + 18 / 2 = ')
aws = input('>')
while True:
if aws == '176':
print('Nice, you correctly answer all the questions')
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print('Ok, seems like you are not good at math.')
print('What about this.')
print('Say you have 10 apples, your Mom gave you another 2.')
print('How many apples you have now?')
choice = input('> ')
if choice == '12':
print('You did a good job!')
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print('How old are you?')
choice = input('> ')
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input('> ')
while True:
if 'y' in choice:
hard()
elif 'n' in choice:
easy()
else:
print("I don't know what that mean")
start()
<|reserved_special_token_1|>
from sys import exit
def hard():
print("Nice! Let's try something harder")
print("Could you calculate this for me?")
print("4 * 35 + 18 / 2 = ")
aws = input(">")
while True:
if aws == "176":
print("Nice, you correctly answer all the questions")
exit(0)
else:
print("Ummm not quite right, let's try something easier")
easy()
def easy():
print("Ok, seems like you are not good at math.")
print("What about this.")
print("Say you have 10 apples, your Mom gave you another 2.")
print("How many apples you have now?")
choice = input("> ")
if choice == "12":
print("You did a good job!")
exit(0)
else:
print("Oh well, it's not end of the world if you did badly in math")
exit(0)
def start():
print("Let's do some math")
print("How old are you?")
choice = input("> ")
age = int(choice) + 20
print(f"So after 20 years, you'll be {age}, right? (y/n)")
choice = input("> ")
while True:
if "y" in choice:
hard()
elif "n" in choice:
easy()
else:
print("I don't know what that mean")
start()
|
flexible
|
{
"blob_id": "5d05351cd6cd6c0d216e8bc09308532605bfd26e",
"index": 3007,
"step-1": "<mask token>\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-4": "from sys import exit\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print('Could you calculate this for me?')\n print('4 * 35 + 18 / 2 = ')\n aws = input('>')\n while True:\n if aws == '176':\n print('Nice, you correctly answer all the questions')\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print('Ok, seems like you are not good at math.')\n print('What about this.')\n print('Say you have 10 apples, your Mom gave you another 2.')\n print('How many apples you have now?')\n choice = input('> ')\n if choice == '12':\n print('You did a good job!')\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print('How old are you?')\n choice = input('> ')\n age = int(choice) + 20\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n choice = input('> ')\n while True:\n if 'y' in choice:\n hard()\n elif 'n' in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-5": "from sys import exit\n\n\ndef hard():\n print(\"Nice! Let's try something harder\")\n print(\"Could you calculate this for me?\")\n print(\"4 * 35 + 18 / 2 = \")\n\n aws = input(\">\")\n\n while True:\n if aws == \"176\":\n print(\"Nice, you correctly answer all the questions\")\n exit(0)\n else:\n print(\"Ummm not quite right, let's try something easier\")\n easy()\n\n\ndef easy():\n print(\"Ok, seems like you are not good at math.\")\n print(\"What about this.\")\n print(\"Say you have 10 apples, your Mom gave you another 2.\")\n print(\"How many apples you have now?\")\n\n choice = input(\"> \")\n\n if choice == \"12\":\n print(\"You did a good job!\")\n exit(0)\n else:\n print(\"Oh well, it's not end of the world if you did badly in math\")\n exit(0)\n\n\ndef start():\n print(\"Let's do some math\")\n print(\"How old are you?\")\n\n choice = input(\"> \")\n age = int(choice) + 20\n\n print(f\"So after 20 years, you'll be {age}, right? (y/n)\")\n\n choice = input(\"> \")\n\n while True:\n if \"y\" in choice:\n hard()\n elif \"n\" in choice:\n easy()\n else:\n print(\"I don't know what that mean\")\n\n\nstart()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
import tensorflow as tf
from arg_parser import args
from model_object import UnetModel
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir= args.savedir)
unet_model.evaluate(args)
if __name__ == "__main__":
main(args)
|
normal
|
{
"blob_id": "588f6f78908e47e0b3f1bc42fffabad34766eede",
"index": 9815,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nfrom arg_parser import args\nfrom model_object import UnetModel\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n",
"step-5": "import numpy as np\nimport tensorflow as tf\n\nfrom arg_parser import args\nfrom model_object import UnetModel\n\ndef main(args):\n \n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n\n unet_model = UnetModel(args) \n\n unet_model.prepare_data(args)\n\n unet_model.create_model(args)\n\n unet_model.train(args)\n\n unet_model.load_best_model(args, load_dir= args.savedir)\n\n unet_model.evaluate(args)\n\nif __name__ == \"__main__\":\n main(args)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""A lightweight Python wrapper of SoX's effects."""
import shlex
from io import BufferedReader, BufferedWriter
from subprocess import PIPE, Popen
import numpy as np
from .sndfiles import (
FileBufferInput,
FileBufferOutput,
FilePathInput,
FilePathOutput,
NumpyArrayInput,
NumpyArrayOutput,
logger,
)
def mutually_exclusive(*args):
return sum(arg is not None for arg in args) < 2
class AudioEffectsChain:
def __init__(self):
self.command = []
def equalizer(self, frequency, q=1.0, db=-3.0):
"""equalizer takes three parameters: filter center frequency in Hz, "q"
or band-width (default=1.0), and a signed number for gain or
attenuation in dB.
Beware of clipping when using positive gain.
"""
self.command.append('equalizer')
self.command.append(frequency)
self.command.append(str(q) + 'q')
self.command.append(db)
return self
def bandpass(self, frequency, q=1.0):
"""bandpass takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies outside the band specified.
"""
self.command.append('bandpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def bandreject(self, frequency, q=1.0):
"""bandreject takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies within the band specified.
"""
self.command.append('bandreject')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):
"""lowshelf takes 3 parameters: a signed number for gain or attenuation
in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).
Beware of Clipping when using positive gain.
"""
self.command.append('bass')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highpass(self, frequency, q=0.707):
"""highpass takes 2 parameters: filter frequency in Hz below which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('highpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowpass(self, frequency, q=0.707):
"""lowpass takes 2 parameters: filter frequency in Hz above which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('lowpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def limiter(self, gain=3.0):
"""limiter takes one parameter: gain in dB.
Beware of adding too much gain, as it can cause audible
distortion. See the compand effect for a more capable limiter.
"""
self.command.append('gain')
self.command.append('-l')
self.command.append(gain)
return self
def normalize(self):
"""normalize has no parameters.
It boosts level so that the loudest part of your file reaches
maximum, without clipping.
"""
self.command.append('gain')
self.command.append('-n')
return self
def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):
"""compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
"""
self.command.append('compand')
self.command.append(str(attack) + ',' + str(decay))
self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))
return self
def sinc(self,
high_pass_frequency=None,
low_pass_frequency=None,
left_t=None,
left_n=None,
right_t=None,
right_n=None,
attenuation=None,
beta=None,
phase=None,
M=None,
I=None,
L=None):
"""sinc takes 12 parameters:
high_pass_frequency in Hz,
low_pass_frequency in Hz,
left_t,
left_n,
right_t,
right_n,
attenuation in dB,
beta,
phase,
M,
I,
L
This effect creates a steep bandpass or
bandreject filter. You may specify as few as the first two
parameters. Setting the high-pass parameter to a lower value
than the low-pass creates a band-reject filter.
"""
self.command.append("sinc")
if not mutually_exclusive(attenuation, beta):
raise ValueError("Attenuation (-a) and beta (-b) are mutually exclusive arguments.")
if attenuation is not None and beta is None:
self.command.append('-a')
self.command.append(str(attenuation))
elif attenuation is None and beta is not None:
self.command.append('-b')
self.command.append(str(beta))
if not mutually_exclusive(phase, M, I, L):
raise ValueError("Phase (-p), -M, L, and -I are mutually exclusive arguments.")
if phase is not None:
self.command.append('-p')
self.command.append(str(phase))
elif M is not None:
self.command.append('-M')
elif I is not None:
self.command.append('-I')
elif L is not None:
self.command.append('-L')
if not mutually_exclusive(left_t, left_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if left_t is not None:
self.command.append('-t')
self.command.append(str(left_t))
if left_n is not None:
self.command.append('-n')
self.command.append(str(left_n))
if high_pass_frequency is not None and low_pass_frequency is None:
self.command.append(str(high_pass_frequency))
elif high_pass_frequency is not None and low_pass_frequency is not None:
self.command.append(str(high_pass_frequency) + '-' + str(low_pass_frequency))
elif high_pass_frequency is None and low_pass_frequency is not None:
self.command.append(str(low_pass_frequency))
if not mutually_exclusive(right_t, right_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if right_t is not None:
self.command.append('-t')
self.command.append(str(right_t))
if right_n is not None:
self.command.append('-n')
self.command.append(str(right_n))
return self
def bend(self, bends, frame_rate=None, over_sample=None):
"""TODO Add docstring."""
self.command.append("bend")
if frame_rate is not None and isinstance(frame_rate, int):
self.command.append('-f %s' % frame_rate)
if over_sample is not None and isinstance(over_sample, int):
self.command.append('-o %s' % over_sample)
for bend in bends:
self.command.append(','.join(bend))
return self
def chorus(self, gain_in, gain_out, decays):
"""TODO Add docstring."""
self.command.append("chorus")
self.command.append(gain_in)
self.command.append(gain_out)
for decay in decays:
modulation = decay.pop()
numerical = decay
self.command.append(' '.join(map(str, numerical)) + ' -' + modulation)
return self
def delay(self,
gain_in=0.8,
gain_out=0.5,
delays=None,
decays=None,
parallel=False):
"""delay takes 4 parameters: input gain (max 1), output gain
and then two lists, delays and decays.
Each list is a pair of comma seperated values within
parenthesis.
"""
if delays is None:
delays = list((1000, 1800))
if decays is None:
decays = list((0.3, 0.25))
self.command.append('echo' + ('s' if parallel else ''))
self.command.append(gain_in)
self.command.append(gain_out)
self.command.extend(list(sum(zip(delays, decays), ())))
return self
def echo(self, **kwargs):
"""TODO Add docstring."""
return self.delay(**kwargs)
def fade(self):
"""TODO Add docstring."""
raise NotImplementedError()
def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape='sine', phase=25, interp='linear'):
"""TODO Add docstring."""
raise NotImplementedError()
def gain(self, db):
"""gain takes one paramter: gain in dB."""
self.command.append('gain')
self.command.append(db)
return self
def mcompand(self):
"""TODO Add docstring."""
raise NotImplementedError()
def noise_reduction(self, amount=0.5):
"""TODO Add docstring."""
# TODO Run sox once with noiseprof on silent portions to generate a noise profile.
raise NotImplementedError()
def oops(self):
"""TODO Add docstring."""
raise NotImplementedError()
def overdrive(self, gain=20, colour=20):
"""overdrive takes 2 parameters: gain in dB and colour which effects
the character of the distortion effet.
Both have a default value of 20. TODO - changing color does not seem to have an audible effect
"""
self.command.append('overdrive')
self.command.append(gain)
self.command.append(colour)
return self
def phaser(self,
gain_in=0.9,
gain_out=0.8,
delay=1,
decay=0.25,
speed=2,
triangular=False):
"""phaser takes 6 parameters: input gain (max 1.0), output gain (max
1.0), delay, decay, speed and LFO shape=trianglar (which must be set to
True or False)"""
self.command.append("phaser")
self.command.append(gain_in)
self.command.append(gain_out)
self.command.append(delay)
self.command.append(decay)
self.command.append(speed)
if triangular:
self.command.append('-t')
else:
self.command.append('-s')
return self
def pitch(self, shift,
use_tree=False,
segment=82,
search=14.68,
overlap=12):
"""pitch takes 4 parameters: user_tree (True or False), segment, search
and overlap."""
self.command.append("pitch")
if use_tree:
self.command.append('-q')
self.command.append(shift)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def loop(self):
"""TODO Add docstring."""
self.command.append('repeat')
self.command.append('-')
return self
def reverb(self,
reverberance=50,
hf_damping=50,
room_scale=100,
stereo_depth=100,
pre_delay=20,
wet_gain=0,
wet_only=False):
"""reverb takes 7 parameters: reverberance, high-freqnency damping,
room scale, stereo depth, pre-delay, wet gain and wet only (True or
False)"""
self.command.append('reverb')
if wet_only:
self.command.append('-w')
self.command.append(reverberance)
self.command.append(hf_damping)
self.command.append(room_scale)
self.command.append(stereo_depth)
self.command.append(pre_delay)
self.command.append(wet_gain)
return self
def reverse(self):
"""reverse takes no parameters.
It plays the input sound backwards.
"""
self.command.append("reverse")
return self
def speed(self, factor, use_semitones=False):
"""speed takes 2 parameters: factor and use-semitones (True or False).
When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.
"""
self.command.append("speed")
self.command.append(factor if not use_semitones else str(factor) + "c")
return self
def synth(self):
raise NotImplementedError()
def tempo(self,
factor,
use_tree=False,
opt_flag=None,
segment=82,
search=14.68,
overlap=12):
"""tempo takes 6 parameters: factor, use tree (True or False), option
flag, segment, search and overlap).
This effect changes the duration of the sound without modifying
pitch.
"""
self.command.append("tempo")
if use_tree:
self.command.append('-q')
if opt_flag in ('l', 'm', 's'):
self.command.append('-%s' % opt_flag)
self.command.append(factor)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def tremolo(self, freq, depth=40):
"""tremolo takes two parameters: frequency and depth (max 100)"""
self.command.append("tremolo")
self.command.append(freq)
self.command.append(depth)
return self
def trim(self, positions):
"""TODO Add docstring."""
self.command.append("trim")
for position in positions:
# TODO: check if the position means something
self.command.append(position)
return self
def upsample(self, factor):
"""TODO Add docstring."""
self.command.append("upsample")
self.command.append(factor)
return self
def vad(self):
raise NotImplementedError()
def vol(self, gain, type="amplitude", limiter_gain=None):
"""vol takes three parameters: gain, gain-type (amplitude, power or dB)
and limiter gain."""
self.command.append("vol")
if type in ["amplitude", "power", "dB"]:
self.command.append(type)
else:
raise ValueError("Type has to be dB, amplitude or power.")
if limiter_gain is not None:
self.command.append(str(limiter_gain))
print(self.command)
return self
def custom(self, command):
"""Run arbitrary SoX effect commands.
Examples:
custom('echo 0.8 0.9 1000 0.3') for an echo effect.
References:
- https://linux.die.net/man/1/soxexam
- http://sox.sourceforge.net/sox.html
- http://tldp.org/LDP/LG/issue73/chung.html
- http://dsl.org/cookbook/cookbook_29.html
"""
self.command.append(command)
return self
def __call__(
self,
src,
dst=np.ndarray,
sample_in=44100, # used only for arrays
sample_out=None,
encoding_out=None,
channels_out=None,
allow_clipping=True):
# depending on the input, using the right object to set up the input data arguments
stdin = None
if isinstance(src, str):
infile = FilePathInput(src)
stdin = src
elif isinstance(src, np.ndarray):
infile = NumpyArrayInput(src, sample_in)
stdin = src
elif isinstance(src, BufferedReader):
infile = FileBufferInput(src)
stdin = infile.data # retrieving the data from the file reader (np array)
else:
infile = None
# finding out which output encoding to use in case the output is ndarray
if encoding_out is None and dst is np.ndarray:
if isinstance(stdin, np.ndarray):
encoding_out = stdin.dtype.type
elif isinstance(stdin, str):
encoding_out = np.float32
# finding out which channel count to use (defaults to the input file's channel count)
if channels_out is None:
if infile is None:
channels_out = 1
else:
channels_out = infile.channels
if sample_out is None: # if the output samplerate isn't specified, default to input's
sample_out = sample_in
# same as for the input data, but for the destination
if isinstance(dst, str):
outfile = FilePathOutput(dst, sample_out, channels_out)
elif dst is np.ndarray:
outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)
elif isinstance(dst, BufferedWriter):
outfile = FileBufferOutput(dst, sample_out, channels_out)
else:
outfile = None
cmd = shlex.split(
' '.join([
'sox',
'-N',
'-V1' if allow_clipping else '-V2',
infile.cmd_prefix if infile is not None else '-d',
outfile.cmd_suffix if outfile is not None else '-d',
] + list(map(str, self.command))),
posix=False,
)
logger.debug("Running command : %s" % cmd)
if isinstance(stdin, np.ndarray):
stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(stdin.tobytes(order='F'))
else:
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise RuntimeError(stderr.decode())
elif stdout:
outsound = np.frombuffer(stdout, dtype=encoding_out)
if channels_out > 1:
outsound = outsound.reshape((channels_out, int(len(outsound) / channels_out)), order='F')
if isinstance(outfile, FileBufferOutput):
outfile.write(outsound)
return outsound
|
normal
|
{
"blob_id": "f98f2ef0d94839711b473ad1ca32b85645d4014e",
"index": 8764,
"step-1": "<mask token>\n\n\nclass AudioEffectsChain:\n\n def __init__(self):\n self.command = []\n\n def equalizer(self, frequency, q=1.0, db=-3.0):\n \"\"\"equalizer takes three parameters: filter center frequency in Hz, \"q\"\n or band-width (default=1.0), and a signed number for gain or\n attenuation in dB.\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('equalizer')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n self.command.append(db)\n return self\n <mask token>\n <mask token>\n\n def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):\n \"\"\"lowshelf takes 3 parameters: a signed number for gain or attenuation\n in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).\n\n Beware of Clipping when using positive gain.\n \"\"\"\n self.command.append('bass')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n <mask token>\n\n def highpass(self, frequency, q=0.707):\n \"\"\"highpass takes 2 parameters: filter frequency in Hz below which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('highpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n <mask token>\n\n def limiter(self, gain=3.0):\n \"\"\"limiter takes one parameter: gain in dB.\n\n Beware of adding too much gain, as it can cause audible\n distortion. See the compand effect for a more capable limiter.\n \"\"\"\n self.command.append('gain')\n self.command.append('-l')\n self.command.append(gain)\n return self\n\n def normalize(self):\n \"\"\"normalize has no parameters.\n\n It boosts level so that the loudest part of your file reaches\n maximum, without clipping.\n \"\"\"\n self.command.append('gain')\n self.command.append('-n')\n return self\n <mask token>\n\n def sinc(self, high_pass_frequency=None, low_pass_frequency=None,\n left_t=None, left_n=None, right_t=None, right_n=None, attenuation=\n None, beta=None, phase=None, M=None, I=None, L=None):\n \"\"\"sinc takes 12 parameters:\n\n high_pass_frequency in Hz,\n low_pass_frequency in Hz,\n left_t,\n left_n,\n right_t,\n right_n,\n attenuation in dB,\n beta,\n phase,\n M,\n I,\n L\n\n This effect creates a steep bandpass or\n bandreject filter. You may specify as few as the first two\n parameters. Setting the high-pass parameter to a lower value\n than the low-pass creates a band-reject filter.\n \"\"\"\n self.command.append('sinc')\n if not mutually_exclusive(attenuation, beta):\n raise ValueError(\n 'Attenuation (-a) and beta (-b) are mutually exclusive arguments.'\n )\n if attenuation is not None and beta is None:\n self.command.append('-a')\n self.command.append(str(attenuation))\n elif attenuation is None and beta is not None:\n self.command.append('-b')\n self.command.append(str(beta))\n if not mutually_exclusive(phase, M, I, L):\n raise ValueError(\n 'Phase (-p), -M, L, and -I are mutually exclusive arguments.')\n if phase is not None:\n self.command.append('-p')\n self.command.append(str(phase))\n elif M is not None:\n self.command.append('-M')\n elif I is not None:\n self.command.append('-I')\n elif L is not None:\n self.command.append('-L')\n if not mutually_exclusive(left_t, left_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if left_t is not None:\n self.command.append('-t')\n self.command.append(str(left_t))\n if left_n is not None:\n self.command.append('-n')\n self.command.append(str(left_n))\n if high_pass_frequency is not None and low_pass_frequency is None:\n self.command.append(str(high_pass_frequency))\n elif high_pass_frequency is not None and low_pass_frequency is not None:\n self.command.append(str(high_pass_frequency) + '-' + str(\n low_pass_frequency))\n elif high_pass_frequency is None and low_pass_frequency is not None:\n self.command.append(str(low_pass_frequency))\n if not mutually_exclusive(right_t, right_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if right_t is not None:\n self.command.append('-t')\n self.command.append(str(right_t))\n if right_n is not None:\n self.command.append('-n')\n self.command.append(str(right_n))\n return self\n\n def bend(self, bends, frame_rate=None, over_sample=None):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('bend')\n if frame_rate is not None and isinstance(frame_rate, int):\n self.command.append('-f %s' % frame_rate)\n if over_sample is not None and isinstance(over_sample, int):\n self.command.append('-o %s' % over_sample)\n for bend in bends:\n self.command.append(','.join(bend))\n return self\n <mask token>\n\n def delay(self, gain_in=0.8, gain_out=0.5, delays=None, decays=None,\n parallel=False):\n \"\"\"delay takes 4 parameters: input gain (max 1), output gain\n and then two lists, delays and decays.\n\n Each list is a pair of comma seperated values within\n parenthesis.\n \"\"\"\n if delays is None:\n delays = list((1000, 1800))\n if decays is None:\n decays = list((0.3, 0.25))\n self.command.append('echo' + ('s' if parallel else ''))\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.extend(list(sum(zip(delays, decays), ())))\n return self\n <mask token>\n <mask token>\n\n def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape\n ='sine', phase=25, interp='linear'):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def gain(self, db):\n \"\"\"gain takes one paramter: gain in dB.\"\"\"\n self.command.append('gain')\n self.command.append(db)\n return self\n <mask token>\n <mask token>\n\n def oops(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def phaser(self, gain_in=0.9, gain_out=0.8, delay=1, decay=0.25, speed=\n 2, triangular=False):\n \"\"\"phaser takes 6 parameters: input gain (max 1.0), output gain (max\n 1.0), delay, decay, speed and LFO shape=trianglar (which must be set to\n True or False)\"\"\"\n self.command.append('phaser')\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.append(delay)\n self.command.append(decay)\n self.command.append(speed)\n if triangular:\n self.command.append('-t')\n else:\n self.command.append('-s')\n return self\n\n def pitch(self, shift, use_tree=False, segment=82, search=14.68, overlap=12\n ):\n \"\"\"pitch takes 4 parameters: user_tree (True or False), segment, search\n and overlap.\"\"\"\n self.command.append('pitch')\n if use_tree:\n self.command.append('-q')\n self.command.append(shift)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def loop(self):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('repeat')\n self.command.append('-')\n return self\n\n def reverb(self, reverberance=50, hf_damping=50, room_scale=100,\n stereo_depth=100, pre_delay=20, wet_gain=0, wet_only=False):\n \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n room scale, stereo depth, pre-delay, wet gain and wet only (True or\n False)\"\"\"\n self.command.append('reverb')\n if wet_only:\n self.command.append('-w')\n self.command.append(reverberance)\n self.command.append(hf_damping)\n self.command.append(room_scale)\n self.command.append(stereo_depth)\n self.command.append(pre_delay)\n self.command.append(wet_gain)\n return self\n\n def reverse(self):\n \"\"\"reverse takes no parameters.\n\n It plays the input sound backwards.\n \"\"\"\n self.command.append('reverse')\n return self\n\n def speed(self, factor, use_semitones=False):\n \"\"\"speed takes 2 parameters: factor and use-semitones (True or False).\n\n When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.\n \"\"\"\n self.command.append('speed')\n self.command.append(factor if not use_semitones else str(factor) + 'c')\n return self\n\n def synth(self):\n raise NotImplementedError()\n <mask token>\n <mask token>\n\n def trim(self, positions):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('trim')\n for position in positions:\n self.command.append(position)\n return self\n\n def upsample(self, factor):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('upsample')\n self.command.append(factor)\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AudioEffectsChain:\n\n def __init__(self):\n self.command = []\n\n def equalizer(self, frequency, q=1.0, db=-3.0):\n \"\"\"equalizer takes three parameters: filter center frequency in Hz, \"q\"\n or band-width (default=1.0), and a signed number for gain or\n attenuation in dB.\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('equalizer')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n self.command.append(db)\n return self\n <mask token>\n <mask token>\n\n def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):\n \"\"\"lowshelf takes 3 parameters: a signed number for gain or attenuation\n in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).\n\n Beware of Clipping when using positive gain.\n \"\"\"\n self.command.append('bass')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n <mask token>\n\n def highpass(self, frequency, q=0.707):\n \"\"\"highpass takes 2 parameters: filter frequency in Hz below which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('highpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n <mask token>\n\n def limiter(self, gain=3.0):\n \"\"\"limiter takes one parameter: gain in dB.\n\n Beware of adding too much gain, as it can cause audible\n distortion. See the compand effect for a more capable limiter.\n \"\"\"\n self.command.append('gain')\n self.command.append('-l')\n self.command.append(gain)\n return self\n\n def normalize(self):\n \"\"\"normalize has no parameters.\n\n It boosts level so that the loudest part of your file reaches\n maximum, without clipping.\n \"\"\"\n self.command.append('gain')\n self.command.append('-n')\n return self\n\n def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20,\n db_from=-20.0, db_to=-20.0):\n \"\"\"compand takes 6 parameters:\n\n attack (seconds), decay (seconds), soft_knee (ex. 6 results\n in 6:1 compression ratio), threshold (a negative value\n in dB), the level below which the signal will NOT be companded\n (a negative value in dB), the level above which the signal will\n NOT be companded (a negative value in dB). This effect\n manipulates dynamic range of the input file.\n \"\"\"\n self.command.append('compand')\n self.command.append(str(attack) + ',' + str(decay))\n self.command.append(str(soft_knee) + ':' + str(threshold) + ',' +\n str(db_from) + ',' + str(db_to))\n return self\n\n def sinc(self, high_pass_frequency=None, low_pass_frequency=None,\n left_t=None, left_n=None, right_t=None, right_n=None, attenuation=\n None, beta=None, phase=None, M=None, I=None, L=None):\n \"\"\"sinc takes 12 parameters:\n\n high_pass_frequency in Hz,\n low_pass_frequency in Hz,\n left_t,\n left_n,\n right_t,\n right_n,\n attenuation in dB,\n beta,\n phase,\n M,\n I,\n L\n\n This effect creates a steep bandpass or\n bandreject filter. You may specify as few as the first two\n parameters. Setting the high-pass parameter to a lower value\n than the low-pass creates a band-reject filter.\n \"\"\"\n self.command.append('sinc')\n if not mutually_exclusive(attenuation, beta):\n raise ValueError(\n 'Attenuation (-a) and beta (-b) are mutually exclusive arguments.'\n )\n if attenuation is not None and beta is None:\n self.command.append('-a')\n self.command.append(str(attenuation))\n elif attenuation is None and beta is not None:\n self.command.append('-b')\n self.command.append(str(beta))\n if not mutually_exclusive(phase, M, I, L):\n raise ValueError(\n 'Phase (-p), -M, L, and -I are mutually exclusive arguments.')\n if phase is not None:\n self.command.append('-p')\n self.command.append(str(phase))\n elif M is not None:\n self.command.append('-M')\n elif I is not None:\n self.command.append('-I')\n elif L is not None:\n self.command.append('-L')\n if not mutually_exclusive(left_t, left_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if left_t is not None:\n self.command.append('-t')\n self.command.append(str(left_t))\n if left_n is not None:\n self.command.append('-n')\n self.command.append(str(left_n))\n if high_pass_frequency is not None and low_pass_frequency is None:\n self.command.append(str(high_pass_frequency))\n elif high_pass_frequency is not None and low_pass_frequency is not None:\n self.command.append(str(high_pass_frequency) + '-' + str(\n low_pass_frequency))\n elif high_pass_frequency is None and low_pass_frequency is not None:\n self.command.append(str(low_pass_frequency))\n if not mutually_exclusive(right_t, right_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if right_t is not None:\n self.command.append('-t')\n self.command.append(str(right_t))\n if right_n is not None:\n self.command.append('-n')\n self.command.append(str(right_n))\n return self\n\n def bend(self, bends, frame_rate=None, over_sample=None):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('bend')\n if frame_rate is not None and isinstance(frame_rate, int):\n self.command.append('-f %s' % frame_rate)\n if over_sample is not None and isinstance(over_sample, int):\n self.command.append('-o %s' % over_sample)\n for bend in bends:\n self.command.append(','.join(bend))\n return self\n\n def chorus(self, gain_in, gain_out, decays):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('chorus')\n self.command.append(gain_in)\n self.command.append(gain_out)\n for decay in decays:\n modulation = decay.pop()\n numerical = decay\n self.command.append(' '.join(map(str, numerical)) + ' -' +\n modulation)\n return self\n\n def delay(self, gain_in=0.8, gain_out=0.5, delays=None, decays=None,\n parallel=False):\n \"\"\"delay takes 4 parameters: input gain (max 1), output gain\n and then two lists, delays and decays.\n\n Each list is a pair of comma seperated values within\n parenthesis.\n \"\"\"\n if delays is None:\n delays = list((1000, 1800))\n if decays is None:\n decays = list((0.3, 0.25))\n self.command.append('echo' + ('s' if parallel else ''))\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.extend(list(sum(zip(delays, decays), ())))\n return self\n\n def echo(self, **kwargs):\n \"\"\"TODO Add docstring.\"\"\"\n return self.delay(**kwargs)\n <mask token>\n\n def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape\n ='sine', phase=25, interp='linear'):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def gain(self, db):\n \"\"\"gain takes one paramter: gain in dB.\"\"\"\n self.command.append('gain')\n self.command.append(db)\n return self\n\n def mcompand(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def oops(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def phaser(self, gain_in=0.9, gain_out=0.8, delay=1, decay=0.25, speed=\n 2, triangular=False):\n \"\"\"phaser takes 6 parameters: input gain (max 1.0), output gain (max\n 1.0), delay, decay, speed and LFO shape=trianglar (which must be set to\n True or False)\"\"\"\n self.command.append('phaser')\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.append(delay)\n self.command.append(decay)\n self.command.append(speed)\n if triangular:\n self.command.append('-t')\n else:\n self.command.append('-s')\n return self\n\n def pitch(self, shift, use_tree=False, segment=82, search=14.68, overlap=12\n ):\n \"\"\"pitch takes 4 parameters: user_tree (True or False), segment, search\n and overlap.\"\"\"\n self.command.append('pitch')\n if use_tree:\n self.command.append('-q')\n self.command.append(shift)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def loop(self):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('repeat')\n self.command.append('-')\n return self\n\n def reverb(self, reverberance=50, hf_damping=50, room_scale=100,\n stereo_depth=100, pre_delay=20, wet_gain=0, wet_only=False):\n \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n room scale, stereo depth, pre-delay, wet gain and wet only (True or\n False)\"\"\"\n self.command.append('reverb')\n if wet_only:\n self.command.append('-w')\n self.command.append(reverberance)\n self.command.append(hf_damping)\n self.command.append(room_scale)\n self.command.append(stereo_depth)\n self.command.append(pre_delay)\n self.command.append(wet_gain)\n return self\n\n def reverse(self):\n \"\"\"reverse takes no parameters.\n\n It plays the input sound backwards.\n \"\"\"\n self.command.append('reverse')\n return self\n\n def speed(self, factor, use_semitones=False):\n \"\"\"speed takes 2 parameters: factor and use-semitones (True or False).\n\n When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.\n \"\"\"\n self.command.append('speed')\n self.command.append(factor if not use_semitones else str(factor) + 'c')\n return self\n\n def synth(self):\n raise NotImplementedError()\n <mask token>\n\n def tremolo(self, freq, depth=40):\n \"\"\"tremolo takes two parameters: frequency and depth (max 100)\"\"\"\n self.command.append('tremolo')\n self.command.append(freq)\n self.command.append(depth)\n return self\n\n def trim(self, positions):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('trim')\n for position in positions:\n self.command.append(position)\n return self\n\n def upsample(self, factor):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('upsample')\n self.command.append(factor)\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AudioEffectsChain:\n\n def __init__(self):\n self.command = []\n\n def equalizer(self, frequency, q=1.0, db=-3.0):\n \"\"\"equalizer takes three parameters: filter center frequency in Hz, \"q\"\n or band-width (default=1.0), and a signed number for gain or\n attenuation in dB.\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('equalizer')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n self.command.append(db)\n return self\n <mask token>\n <mask token>\n\n def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):\n \"\"\"lowshelf takes 3 parameters: a signed number for gain or attenuation\n in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).\n\n Beware of Clipping when using positive gain.\n \"\"\"\n self.command.append('bass')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n <mask token>\n\n def highpass(self, frequency, q=0.707):\n \"\"\"highpass takes 2 parameters: filter frequency in Hz below which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('highpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n <mask token>\n\n def limiter(self, gain=3.0):\n \"\"\"limiter takes one parameter: gain in dB.\n\n Beware of adding too much gain, as it can cause audible\n distortion. See the compand effect for a more capable limiter.\n \"\"\"\n self.command.append('gain')\n self.command.append('-l')\n self.command.append(gain)\n return self\n\n def normalize(self):\n \"\"\"normalize has no parameters.\n\n It boosts level so that the loudest part of your file reaches\n maximum, without clipping.\n \"\"\"\n self.command.append('gain')\n self.command.append('-n')\n return self\n\n def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20,\n db_from=-20.0, db_to=-20.0):\n \"\"\"compand takes 6 parameters:\n\n attack (seconds), decay (seconds), soft_knee (ex. 6 results\n in 6:1 compression ratio), threshold (a negative value\n in dB), the level below which the signal will NOT be companded\n (a negative value in dB), the level above which the signal will\n NOT be companded (a negative value in dB). This effect\n manipulates dynamic range of the input file.\n \"\"\"\n self.command.append('compand')\n self.command.append(str(attack) + ',' + str(decay))\n self.command.append(str(soft_knee) + ':' + str(threshold) + ',' +\n str(db_from) + ',' + str(db_to))\n return self\n\n def sinc(self, high_pass_frequency=None, low_pass_frequency=None,\n left_t=None, left_n=None, right_t=None, right_n=None, attenuation=\n None, beta=None, phase=None, M=None, I=None, L=None):\n \"\"\"sinc takes 12 parameters:\n\n high_pass_frequency in Hz,\n low_pass_frequency in Hz,\n left_t,\n left_n,\n right_t,\n right_n,\n attenuation in dB,\n beta,\n phase,\n M,\n I,\n L\n\n This effect creates a steep bandpass or\n bandreject filter. You may specify as few as the first two\n parameters. Setting the high-pass parameter to a lower value\n than the low-pass creates a band-reject filter.\n \"\"\"\n self.command.append('sinc')\n if not mutually_exclusive(attenuation, beta):\n raise ValueError(\n 'Attenuation (-a) and beta (-b) are mutually exclusive arguments.'\n )\n if attenuation is not None and beta is None:\n self.command.append('-a')\n self.command.append(str(attenuation))\n elif attenuation is None and beta is not None:\n self.command.append('-b')\n self.command.append(str(beta))\n if not mutually_exclusive(phase, M, I, L):\n raise ValueError(\n 'Phase (-p), -M, L, and -I are mutually exclusive arguments.')\n if phase is not None:\n self.command.append('-p')\n self.command.append(str(phase))\n elif M is not None:\n self.command.append('-M')\n elif I is not None:\n self.command.append('-I')\n elif L is not None:\n self.command.append('-L')\n if not mutually_exclusive(left_t, left_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if left_t is not None:\n self.command.append('-t')\n self.command.append(str(left_t))\n if left_n is not None:\n self.command.append('-n')\n self.command.append(str(left_n))\n if high_pass_frequency is not None and low_pass_frequency is None:\n self.command.append(str(high_pass_frequency))\n elif high_pass_frequency is not None and low_pass_frequency is not None:\n self.command.append(str(high_pass_frequency) + '-' + str(\n low_pass_frequency))\n elif high_pass_frequency is None and low_pass_frequency is not None:\n self.command.append(str(low_pass_frequency))\n if not mutually_exclusive(right_t, right_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if right_t is not None:\n self.command.append('-t')\n self.command.append(str(right_t))\n if right_n is not None:\n self.command.append('-n')\n self.command.append(str(right_n))\n return self\n\n def bend(self, bends, frame_rate=None, over_sample=None):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('bend')\n if frame_rate is not None and isinstance(frame_rate, int):\n self.command.append('-f %s' % frame_rate)\n if over_sample is not None and isinstance(over_sample, int):\n self.command.append('-o %s' % over_sample)\n for bend in bends:\n self.command.append(','.join(bend))\n return self\n\n def chorus(self, gain_in, gain_out, decays):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('chorus')\n self.command.append(gain_in)\n self.command.append(gain_out)\n for decay in decays:\n modulation = decay.pop()\n numerical = decay\n self.command.append(' '.join(map(str, numerical)) + ' -' +\n modulation)\n return self\n\n def delay(self, gain_in=0.8, gain_out=0.5, delays=None, decays=None,\n parallel=False):\n \"\"\"delay takes 4 parameters: input gain (max 1), output gain\n and then two lists, delays and decays.\n\n Each list is a pair of comma seperated values within\n parenthesis.\n \"\"\"\n if delays is None:\n delays = list((1000, 1800))\n if decays is None:\n decays = list((0.3, 0.25))\n self.command.append('echo' + ('s' if parallel else ''))\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.extend(list(sum(zip(delays, decays), ())))\n return self\n\n def echo(self, **kwargs):\n \"\"\"TODO Add docstring.\"\"\"\n return self.delay(**kwargs)\n <mask token>\n\n def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape\n ='sine', phase=25, interp='linear'):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def gain(self, db):\n \"\"\"gain takes one paramter: gain in dB.\"\"\"\n self.command.append('gain')\n self.command.append(db)\n return self\n\n def mcompand(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def oops(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def phaser(self, gain_in=0.9, gain_out=0.8, delay=1, decay=0.25, speed=\n 2, triangular=False):\n \"\"\"phaser takes 6 parameters: input gain (max 1.0), output gain (max\n 1.0), delay, decay, speed and LFO shape=trianglar (which must be set to\n True or False)\"\"\"\n self.command.append('phaser')\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.append(delay)\n self.command.append(decay)\n self.command.append(speed)\n if triangular:\n self.command.append('-t')\n else:\n self.command.append('-s')\n return self\n\n def pitch(self, shift, use_tree=False, segment=82, search=14.68, overlap=12\n ):\n \"\"\"pitch takes 4 parameters: user_tree (True or False), segment, search\n and overlap.\"\"\"\n self.command.append('pitch')\n if use_tree:\n self.command.append('-q')\n self.command.append(shift)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def loop(self):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('repeat')\n self.command.append('-')\n return self\n\n def reverb(self, reverberance=50, hf_damping=50, room_scale=100,\n stereo_depth=100, pre_delay=20, wet_gain=0, wet_only=False):\n \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n room scale, stereo depth, pre-delay, wet gain and wet only (True or\n False)\"\"\"\n self.command.append('reverb')\n if wet_only:\n self.command.append('-w')\n self.command.append(reverberance)\n self.command.append(hf_damping)\n self.command.append(room_scale)\n self.command.append(stereo_depth)\n self.command.append(pre_delay)\n self.command.append(wet_gain)\n return self\n\n def reverse(self):\n \"\"\"reverse takes no parameters.\n\n It plays the input sound backwards.\n \"\"\"\n self.command.append('reverse')\n return self\n\n def speed(self, factor, use_semitones=False):\n \"\"\"speed takes 2 parameters: factor and use-semitones (True or False).\n\n When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.\n \"\"\"\n self.command.append('speed')\n self.command.append(factor if not use_semitones else str(factor) + 'c')\n return self\n\n def synth(self):\n raise NotImplementedError()\n\n def tempo(self, factor, use_tree=False, opt_flag=None, segment=82,\n search=14.68, overlap=12):\n \"\"\"tempo takes 6 parameters: factor, use tree (True or False), option\n flag, segment, search and overlap).\n\n This effect changes the duration of the sound without modifying\n pitch.\n \"\"\"\n self.command.append('tempo')\n if use_tree:\n self.command.append('-q')\n if opt_flag in ('l', 'm', 's'):\n self.command.append('-%s' % opt_flag)\n self.command.append(factor)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def tremolo(self, freq, depth=40):\n \"\"\"tremolo takes two parameters: frequency and depth (max 100)\"\"\"\n self.command.append('tremolo')\n self.command.append(freq)\n self.command.append(depth)\n return self\n\n def trim(self, positions):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('trim')\n for position in positions:\n self.command.append(position)\n return self\n\n def upsample(self, factor):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('upsample')\n self.command.append(factor)\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def __call__(self, src, dst=np.ndarray, sample_in=44100, sample_out=\n None, encoding_out=None, channels_out=None, allow_clipping=True):\n stdin = None\n if isinstance(src, str):\n infile = FilePathInput(src)\n stdin = src\n elif isinstance(src, np.ndarray):\n infile = NumpyArrayInput(src, sample_in)\n stdin = src\n elif isinstance(src, BufferedReader):\n infile = FileBufferInput(src)\n stdin = infile.data\n else:\n infile = None\n if encoding_out is None and dst is np.ndarray:\n if isinstance(stdin, np.ndarray):\n encoding_out = stdin.dtype.type\n elif isinstance(stdin, str):\n encoding_out = np.float32\n if channels_out is None:\n if infile is None:\n channels_out = 1\n else:\n channels_out = infile.channels\n if sample_out is None:\n sample_out = sample_in\n if isinstance(dst, str):\n outfile = FilePathOutput(dst, sample_out, channels_out)\n elif dst is np.ndarray:\n outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)\n elif isinstance(dst, BufferedWriter):\n outfile = FileBufferOutput(dst, sample_out, channels_out)\n else:\n outfile = None\n cmd = shlex.split(' '.join(['sox', '-N', '-V1' if allow_clipping else\n '-V2', infile.cmd_prefix if infile is not None else '-d', \n outfile.cmd_suffix if outfile is not None else '-d'] + list(map\n (str, self.command))), posix=False)\n logger.debug('Running command : %s' % cmd)\n if isinstance(stdin, np.ndarray):\n stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE\n ).communicate(stdin.tobytes(order='F'))\n else:\n stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n if stderr:\n raise RuntimeError(stderr.decode())\n elif stdout:\n outsound = np.frombuffer(stdout, dtype=encoding_out)\n if channels_out > 1:\n outsound = outsound.reshape((channels_out, int(len(outsound\n ) / channels_out)), order='F')\n if isinstance(outfile, FileBufferOutput):\n outfile.write(outsound)\n return outsound\n",
"step-4": "<mask token>\n\n\nclass AudioEffectsChain:\n\n def __init__(self):\n self.command = []\n\n def equalizer(self, frequency, q=1.0, db=-3.0):\n \"\"\"equalizer takes three parameters: filter center frequency in Hz, \"q\"\n or band-width (default=1.0), and a signed number for gain or\n attenuation in dB.\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('equalizer')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n self.command.append(db)\n return self\n <mask token>\n <mask token>\n\n def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):\n \"\"\"lowshelf takes 3 parameters: a signed number for gain or attenuation\n in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).\n\n Beware of Clipping when using positive gain.\n \"\"\"\n self.command.append('bass')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n\n def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):\n \"\"\"highshelf takes 3 parameters: a signed number for gain or\n attenuation in dB, filter frequency in Hz and slope (default=0.5).\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('treble')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n\n def highpass(self, frequency, q=0.707):\n \"\"\"highpass takes 2 parameters: filter frequency in Hz below which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('highpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def lowpass(self, frequency, q=0.707):\n \"\"\"lowpass takes 2 parameters: filter frequency in Hz above which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('lowpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def limiter(self, gain=3.0):\n \"\"\"limiter takes one parameter: gain in dB.\n\n Beware of adding too much gain, as it can cause audible\n distortion. See the compand effect for a more capable limiter.\n \"\"\"\n self.command.append('gain')\n self.command.append('-l')\n self.command.append(gain)\n return self\n\n def normalize(self):\n \"\"\"normalize has no parameters.\n\n It boosts level so that the loudest part of your file reaches\n maximum, without clipping.\n \"\"\"\n self.command.append('gain')\n self.command.append('-n')\n return self\n\n def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20,\n db_from=-20.0, db_to=-20.0):\n \"\"\"compand takes 6 parameters:\n\n attack (seconds), decay (seconds), soft_knee (ex. 6 results\n in 6:1 compression ratio), threshold (a negative value\n in dB), the level below which the signal will NOT be companded\n (a negative value in dB), the level above which the signal will\n NOT be companded (a negative value in dB). This effect\n manipulates dynamic range of the input file.\n \"\"\"\n self.command.append('compand')\n self.command.append(str(attack) + ',' + str(decay))\n self.command.append(str(soft_knee) + ':' + str(threshold) + ',' +\n str(db_from) + ',' + str(db_to))\n return self\n\n def sinc(self, high_pass_frequency=None, low_pass_frequency=None,\n left_t=None, left_n=None, right_t=None, right_n=None, attenuation=\n None, beta=None, phase=None, M=None, I=None, L=None):\n \"\"\"sinc takes 12 parameters:\n\n high_pass_frequency in Hz,\n low_pass_frequency in Hz,\n left_t,\n left_n,\n right_t,\n right_n,\n attenuation in dB,\n beta,\n phase,\n M,\n I,\n L\n\n This effect creates a steep bandpass or\n bandreject filter. You may specify as few as the first two\n parameters. Setting the high-pass parameter to a lower value\n than the low-pass creates a band-reject filter.\n \"\"\"\n self.command.append('sinc')\n if not mutually_exclusive(attenuation, beta):\n raise ValueError(\n 'Attenuation (-a) and beta (-b) are mutually exclusive arguments.'\n )\n if attenuation is not None and beta is None:\n self.command.append('-a')\n self.command.append(str(attenuation))\n elif attenuation is None and beta is not None:\n self.command.append('-b')\n self.command.append(str(beta))\n if not mutually_exclusive(phase, M, I, L):\n raise ValueError(\n 'Phase (-p), -M, L, and -I are mutually exclusive arguments.')\n if phase is not None:\n self.command.append('-p')\n self.command.append(str(phase))\n elif M is not None:\n self.command.append('-M')\n elif I is not None:\n self.command.append('-I')\n elif L is not None:\n self.command.append('-L')\n if not mutually_exclusive(left_t, left_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if left_t is not None:\n self.command.append('-t')\n self.command.append(str(left_t))\n if left_n is not None:\n self.command.append('-n')\n self.command.append(str(left_n))\n if high_pass_frequency is not None and low_pass_frequency is None:\n self.command.append(str(high_pass_frequency))\n elif high_pass_frequency is not None and low_pass_frequency is not None:\n self.command.append(str(high_pass_frequency) + '-' + str(\n low_pass_frequency))\n elif high_pass_frequency is None and low_pass_frequency is not None:\n self.command.append(str(low_pass_frequency))\n if not mutually_exclusive(right_t, right_t):\n raise ValueError(\n 'Transition bands options (-t or -n) are mutually exclusive.')\n if right_t is not None:\n self.command.append('-t')\n self.command.append(str(right_t))\n if right_n is not None:\n self.command.append('-n')\n self.command.append(str(right_n))\n return self\n\n def bend(self, bends, frame_rate=None, over_sample=None):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('bend')\n if frame_rate is not None and isinstance(frame_rate, int):\n self.command.append('-f %s' % frame_rate)\n if over_sample is not None and isinstance(over_sample, int):\n self.command.append('-o %s' % over_sample)\n for bend in bends:\n self.command.append(','.join(bend))\n return self\n\n def chorus(self, gain_in, gain_out, decays):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('chorus')\n self.command.append(gain_in)\n self.command.append(gain_out)\n for decay in decays:\n modulation = decay.pop()\n numerical = decay\n self.command.append(' '.join(map(str, numerical)) + ' -' +\n modulation)\n return self\n\n def delay(self, gain_in=0.8, gain_out=0.5, delays=None, decays=None,\n parallel=False):\n \"\"\"delay takes 4 parameters: input gain (max 1), output gain\n and then two lists, delays and decays.\n\n Each list is a pair of comma seperated values within\n parenthesis.\n \"\"\"\n if delays is None:\n delays = list((1000, 1800))\n if decays is None:\n decays = list((0.3, 0.25))\n self.command.append('echo' + ('s' if parallel else ''))\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.extend(list(sum(zip(delays, decays), ())))\n return self\n\n def echo(self, **kwargs):\n \"\"\"TODO Add docstring.\"\"\"\n return self.delay(**kwargs)\n <mask token>\n\n def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape\n ='sine', phase=25, interp='linear'):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def gain(self, db):\n \"\"\"gain takes one paramter: gain in dB.\"\"\"\n self.command.append('gain')\n self.command.append(db)\n return self\n\n def mcompand(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def oops(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n <mask token>\n\n def phaser(self, gain_in=0.9, gain_out=0.8, delay=1, decay=0.25, speed=\n 2, triangular=False):\n \"\"\"phaser takes 6 parameters: input gain (max 1.0), output gain (max\n 1.0), delay, decay, speed and LFO shape=trianglar (which must be set to\n True or False)\"\"\"\n self.command.append('phaser')\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.append(delay)\n self.command.append(decay)\n self.command.append(speed)\n if triangular:\n self.command.append('-t')\n else:\n self.command.append('-s')\n return self\n\n def pitch(self, shift, use_tree=False, segment=82, search=14.68, overlap=12\n ):\n \"\"\"pitch takes 4 parameters: user_tree (True or False), segment, search\n and overlap.\"\"\"\n self.command.append('pitch')\n if use_tree:\n self.command.append('-q')\n self.command.append(shift)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def loop(self):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('repeat')\n self.command.append('-')\n return self\n\n def reverb(self, reverberance=50, hf_damping=50, room_scale=100,\n stereo_depth=100, pre_delay=20, wet_gain=0, wet_only=False):\n \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n room scale, stereo depth, pre-delay, wet gain and wet only (True or\n False)\"\"\"\n self.command.append('reverb')\n if wet_only:\n self.command.append('-w')\n self.command.append(reverberance)\n self.command.append(hf_damping)\n self.command.append(room_scale)\n self.command.append(stereo_depth)\n self.command.append(pre_delay)\n self.command.append(wet_gain)\n return self\n\n def reverse(self):\n \"\"\"reverse takes no parameters.\n\n It plays the input sound backwards.\n \"\"\"\n self.command.append('reverse')\n return self\n\n def speed(self, factor, use_semitones=False):\n \"\"\"speed takes 2 parameters: factor and use-semitones (True or False).\n\n When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.\n \"\"\"\n self.command.append('speed')\n self.command.append(factor if not use_semitones else str(factor) + 'c')\n return self\n\n def synth(self):\n raise NotImplementedError()\n\n def tempo(self, factor, use_tree=False, opt_flag=None, segment=82,\n search=14.68, overlap=12):\n \"\"\"tempo takes 6 parameters: factor, use tree (True or False), option\n flag, segment, search and overlap).\n\n This effect changes the duration of the sound without modifying\n pitch.\n \"\"\"\n self.command.append('tempo')\n if use_tree:\n self.command.append('-q')\n if opt_flag in ('l', 'm', 's'):\n self.command.append('-%s' % opt_flag)\n self.command.append(factor)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def tremolo(self, freq, depth=40):\n \"\"\"tremolo takes two parameters: frequency and depth (max 100)\"\"\"\n self.command.append('tremolo')\n self.command.append(freq)\n self.command.append(depth)\n return self\n\n def trim(self, positions):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('trim')\n for position in positions:\n self.command.append(position)\n return self\n\n def upsample(self, factor):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('upsample')\n self.command.append(factor)\n return self\n <mask token>\n <mask token>\n <mask token>\n\n def __call__(self, src, dst=np.ndarray, sample_in=44100, sample_out=\n None, encoding_out=None, channels_out=None, allow_clipping=True):\n stdin = None\n if isinstance(src, str):\n infile = FilePathInput(src)\n stdin = src\n elif isinstance(src, np.ndarray):\n infile = NumpyArrayInput(src, sample_in)\n stdin = src\n elif isinstance(src, BufferedReader):\n infile = FileBufferInput(src)\n stdin = infile.data\n else:\n infile = None\n if encoding_out is None and dst is np.ndarray:\n if isinstance(stdin, np.ndarray):\n encoding_out = stdin.dtype.type\n elif isinstance(stdin, str):\n encoding_out = np.float32\n if channels_out is None:\n if infile is None:\n channels_out = 1\n else:\n channels_out = infile.channels\n if sample_out is None:\n sample_out = sample_in\n if isinstance(dst, str):\n outfile = FilePathOutput(dst, sample_out, channels_out)\n elif dst is np.ndarray:\n outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)\n elif isinstance(dst, BufferedWriter):\n outfile = FileBufferOutput(dst, sample_out, channels_out)\n else:\n outfile = None\n cmd = shlex.split(' '.join(['sox', '-N', '-V1' if allow_clipping else\n '-V2', infile.cmd_prefix if infile is not None else '-d', \n outfile.cmd_suffix if outfile is not None else '-d'] + list(map\n (str, self.command))), posix=False)\n logger.debug('Running command : %s' % cmd)\n if isinstance(stdin, np.ndarray):\n stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE\n ).communicate(stdin.tobytes(order='F'))\n else:\n stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n if stderr:\n raise RuntimeError(stderr.decode())\n elif stdout:\n outsound = np.frombuffer(stdout, dtype=encoding_out)\n if channels_out > 1:\n outsound = outsound.reshape((channels_out, int(len(outsound\n ) / channels_out)), order='F')\n if isinstance(outfile, FileBufferOutput):\n outfile.write(outsound)\n return outsound\n",
"step-5": "\"\"\"A lightweight Python wrapper of SoX's effects.\"\"\"\nimport shlex\nfrom io import BufferedReader, BufferedWriter\nfrom subprocess import PIPE, Popen\n\nimport numpy as np\n\nfrom .sndfiles import (\n FileBufferInput,\n FileBufferOutput,\n FilePathInput,\n FilePathOutput,\n NumpyArrayInput,\n NumpyArrayOutput,\n logger,\n)\n\n\ndef mutually_exclusive(*args):\n return sum(arg is not None for arg in args) < 2\n\n\nclass AudioEffectsChain:\n def __init__(self):\n self.command = []\n\n def equalizer(self, frequency, q=1.0, db=-3.0):\n \"\"\"equalizer takes three parameters: filter center frequency in Hz, \"q\"\n or band-width (default=1.0), and a signed number for gain or\n attenuation in dB.\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('equalizer')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n self.command.append(db)\n return self\n\n def bandpass(self, frequency, q=1.0):\n \"\"\"bandpass takes 2 parameters: filter center frequency in Hz and \"q\"\n or band-width (default=1.0).\n\n It gradually removes frequencies outside the band specified.\n \"\"\"\n self.command.append('bandpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def bandreject(self, frequency, q=1.0):\n \"\"\"bandreject takes 2 parameters: filter center frequency in Hz and \"q\"\n or band-width (default=1.0).\n\n It gradually removes frequencies within the band specified.\n \"\"\"\n self.command.append('bandreject')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):\n \"\"\"lowshelf takes 3 parameters: a signed number for gain or attenuation\n in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).\n\n Beware of Clipping when using positive gain.\n \"\"\"\n self.command.append('bass')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n\n def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):\n \"\"\"highshelf takes 3 parameters: a signed number for gain or\n attenuation in dB, filter frequency in Hz and slope (default=0.5).\n\n Beware of clipping when using positive gain.\n \"\"\"\n self.command.append('treble')\n self.command.append(gain)\n self.command.append(frequency)\n self.command.append(slope)\n return self\n\n def highpass(self, frequency, q=0.707):\n \"\"\"highpass takes 2 parameters: filter frequency in Hz below which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('highpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def lowpass(self, frequency, q=0.707):\n \"\"\"lowpass takes 2 parameters: filter frequency in Hz above which\n frequencies will be attenuated and q (default=0.707).\n\n Beware of clipping when using high q values.\n \"\"\"\n self.command.append('lowpass')\n self.command.append(frequency)\n self.command.append(str(q) + 'q')\n return self\n\n def limiter(self, gain=3.0):\n \"\"\"limiter takes one parameter: gain in dB.\n\n Beware of adding too much gain, as it can cause audible\n distortion. See the compand effect for a more capable limiter.\n \"\"\"\n self.command.append('gain')\n self.command.append('-l')\n self.command.append(gain)\n return self\n\n def normalize(self):\n \"\"\"normalize has no parameters.\n\n It boosts level so that the loudest part of your file reaches\n maximum, without clipping.\n \"\"\"\n self.command.append('gain')\n self.command.append('-n')\n return self\n\n def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):\n \"\"\"compand takes 6 parameters:\n\n attack (seconds), decay (seconds), soft_knee (ex. 6 results\n in 6:1 compression ratio), threshold (a negative value\n in dB), the level below which the signal will NOT be companded\n (a negative value in dB), the level above which the signal will\n NOT be companded (a negative value in dB). This effect\n manipulates dynamic range of the input file.\n \"\"\"\n self.command.append('compand')\n self.command.append(str(attack) + ',' + str(decay))\n self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))\n return self\n\n def sinc(self,\n high_pass_frequency=None,\n low_pass_frequency=None,\n left_t=None,\n left_n=None,\n right_t=None,\n right_n=None,\n attenuation=None,\n beta=None,\n phase=None,\n M=None,\n I=None,\n L=None):\n \"\"\"sinc takes 12 parameters:\n\n high_pass_frequency in Hz,\n low_pass_frequency in Hz,\n left_t,\n left_n,\n right_t,\n right_n,\n attenuation in dB,\n beta,\n phase,\n M,\n I,\n L\n\n This effect creates a steep bandpass or\n bandreject filter. You may specify as few as the first two\n parameters. Setting the high-pass parameter to a lower value\n than the low-pass creates a band-reject filter.\n \"\"\"\n self.command.append(\"sinc\")\n if not mutually_exclusive(attenuation, beta):\n raise ValueError(\"Attenuation (-a) and beta (-b) are mutually exclusive arguments.\")\n if attenuation is not None and beta is None:\n self.command.append('-a')\n self.command.append(str(attenuation))\n elif attenuation is None and beta is not None:\n self.command.append('-b')\n self.command.append(str(beta))\n\n if not mutually_exclusive(phase, M, I, L):\n raise ValueError(\"Phase (-p), -M, L, and -I are mutually exclusive arguments.\")\n if phase is not None:\n self.command.append('-p')\n self.command.append(str(phase))\n elif M is not None:\n self.command.append('-M')\n elif I is not None:\n self.command.append('-I')\n elif L is not None:\n self.command.append('-L')\n\n if not mutually_exclusive(left_t, left_t):\n raise ValueError(\"Transition bands options (-t or -n) are mutually exclusive.\")\n if left_t is not None:\n self.command.append('-t')\n self.command.append(str(left_t))\n if left_n is not None:\n self.command.append('-n')\n self.command.append(str(left_n))\n\n if high_pass_frequency is not None and low_pass_frequency is None:\n self.command.append(str(high_pass_frequency))\n elif high_pass_frequency is not None and low_pass_frequency is not None:\n self.command.append(str(high_pass_frequency) + '-' + str(low_pass_frequency))\n elif high_pass_frequency is None and low_pass_frequency is not None:\n self.command.append(str(low_pass_frequency))\n\n if not mutually_exclusive(right_t, right_t):\n raise ValueError(\"Transition bands options (-t or -n) are mutually exclusive.\")\n if right_t is not None:\n self.command.append('-t')\n self.command.append(str(right_t))\n if right_n is not None:\n self.command.append('-n')\n self.command.append(str(right_n))\n return self\n\n def bend(self, bends, frame_rate=None, over_sample=None):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append(\"bend\")\n if frame_rate is not None and isinstance(frame_rate, int):\n self.command.append('-f %s' % frame_rate)\n if over_sample is not None and isinstance(over_sample, int):\n self.command.append('-o %s' % over_sample)\n for bend in bends:\n self.command.append(','.join(bend))\n return self\n\n def chorus(self, gain_in, gain_out, decays):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append(\"chorus\")\n self.command.append(gain_in)\n self.command.append(gain_out)\n for decay in decays:\n modulation = decay.pop()\n numerical = decay\n self.command.append(' '.join(map(str, numerical)) + ' -' + modulation)\n return self\n\n def delay(self,\n gain_in=0.8,\n gain_out=0.5,\n delays=None,\n decays=None,\n parallel=False):\n \"\"\"delay takes 4 parameters: input gain (max 1), output gain\n and then two lists, delays and decays.\n\n Each list is a pair of comma seperated values within\n parenthesis.\n \"\"\"\n if delays is None:\n delays = list((1000, 1800))\n if decays is None:\n decays = list((0.3, 0.25))\n self.command.append('echo' + ('s' if parallel else ''))\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.extend(list(sum(zip(delays, decays), ())))\n return self\n\n def echo(self, **kwargs):\n \"\"\"TODO Add docstring.\"\"\"\n return self.delay(**kwargs)\n\n def fade(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape='sine', phase=25, interp='linear'):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def gain(self, db):\n \"\"\"gain takes one paramter: gain in dB.\"\"\"\n self.command.append('gain')\n self.command.append(db)\n return self\n\n def mcompand(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def noise_reduction(self, amount=0.5):\n \"\"\"TODO Add docstring.\"\"\"\n # TODO Run sox once with noiseprof on silent portions to generate a noise profile.\n raise NotImplementedError()\n\n def oops(self):\n \"\"\"TODO Add docstring.\"\"\"\n raise NotImplementedError()\n\n def overdrive(self, gain=20, colour=20):\n \"\"\"overdrive takes 2 parameters: gain in dB and colour which effects\n the character of the distortion effet.\n\n Both have a default value of 20. TODO - changing color does not seem to have an audible effect\n \"\"\"\n self.command.append('overdrive')\n self.command.append(gain)\n self.command.append(colour)\n return self\n\n def phaser(self,\n gain_in=0.9,\n gain_out=0.8,\n delay=1,\n decay=0.25,\n speed=2,\n triangular=False):\n \"\"\"phaser takes 6 parameters: input gain (max 1.0), output gain (max\n 1.0), delay, decay, speed and LFO shape=trianglar (which must be set to\n True or False)\"\"\"\n self.command.append(\"phaser\")\n self.command.append(gain_in)\n self.command.append(gain_out)\n self.command.append(delay)\n self.command.append(decay)\n self.command.append(speed)\n if triangular:\n self.command.append('-t')\n else:\n self.command.append('-s')\n return self\n\n def pitch(self, shift,\n use_tree=False,\n segment=82,\n search=14.68,\n overlap=12):\n \"\"\"pitch takes 4 parameters: user_tree (True or False), segment, search\n and overlap.\"\"\"\n self.command.append(\"pitch\")\n if use_tree:\n self.command.append('-q')\n self.command.append(shift)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def loop(self):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append('repeat')\n self.command.append('-')\n return self\n\n def reverb(self,\n reverberance=50,\n hf_damping=50,\n room_scale=100,\n stereo_depth=100,\n pre_delay=20,\n wet_gain=0,\n wet_only=False):\n \"\"\"reverb takes 7 parameters: reverberance, high-freqnency damping,\n room scale, stereo depth, pre-delay, wet gain and wet only (True or\n False)\"\"\"\n self.command.append('reverb')\n if wet_only:\n self.command.append('-w')\n self.command.append(reverberance)\n self.command.append(hf_damping)\n self.command.append(room_scale)\n self.command.append(stereo_depth)\n self.command.append(pre_delay)\n self.command.append(wet_gain)\n return self\n\n def reverse(self):\n \"\"\"reverse takes no parameters.\n\n It plays the input sound backwards.\n \"\"\"\n self.command.append(\"reverse\")\n return self\n\n def speed(self, factor, use_semitones=False):\n \"\"\"speed takes 2 parameters: factor and use-semitones (True or False).\n\n When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.\n \"\"\"\n self.command.append(\"speed\")\n self.command.append(factor if not use_semitones else str(factor) + \"c\")\n return self\n\n def synth(self):\n raise NotImplementedError()\n\n def tempo(self,\n factor,\n use_tree=False,\n opt_flag=None,\n segment=82,\n search=14.68,\n overlap=12):\n \"\"\"tempo takes 6 parameters: factor, use tree (True or False), option\n flag, segment, search and overlap).\n\n This effect changes the duration of the sound without modifying\n pitch.\n \"\"\"\n self.command.append(\"tempo\")\n\n if use_tree:\n self.command.append('-q')\n if opt_flag in ('l', 'm', 's'):\n self.command.append('-%s' % opt_flag)\n self.command.append(factor)\n self.command.append(segment)\n self.command.append(search)\n self.command.append(overlap)\n return self\n\n def tremolo(self, freq, depth=40):\n \"\"\"tremolo takes two parameters: frequency and depth (max 100)\"\"\"\n self.command.append(\"tremolo\")\n self.command.append(freq)\n self.command.append(depth)\n return self\n\n def trim(self, positions):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append(\"trim\")\n for position in positions:\n # TODO: check if the position means something\n self.command.append(position)\n return self\n\n def upsample(self, factor):\n \"\"\"TODO Add docstring.\"\"\"\n self.command.append(\"upsample\")\n self.command.append(factor)\n return self\n\n def vad(self):\n raise NotImplementedError()\n\n def vol(self, gain, type=\"amplitude\", limiter_gain=None):\n \"\"\"vol takes three parameters: gain, gain-type (amplitude, power or dB)\n and limiter gain.\"\"\"\n self.command.append(\"vol\")\n if type in [\"amplitude\", \"power\", \"dB\"]:\n self.command.append(type)\n else:\n raise ValueError(\"Type has to be dB, amplitude or power.\")\n if limiter_gain is not None:\n self.command.append(str(limiter_gain))\n print(self.command)\n return self\n\n def custom(self, command):\n \"\"\"Run arbitrary SoX effect commands.\n\n Examples:\n custom('echo 0.8 0.9 1000 0.3') for an echo effect.\n\n References:\n - https://linux.die.net/man/1/soxexam\n - http://sox.sourceforge.net/sox.html\n - http://tldp.org/LDP/LG/issue73/chung.html\n - http://dsl.org/cookbook/cookbook_29.html\n \"\"\"\n self.command.append(command)\n return self\n\n def __call__(\n self,\n src,\n dst=np.ndarray,\n sample_in=44100, # used only for arrays\n sample_out=None,\n encoding_out=None,\n channels_out=None,\n allow_clipping=True):\n\n # depending on the input, using the right object to set up the input data arguments\n stdin = None\n if isinstance(src, str):\n infile = FilePathInput(src)\n stdin = src\n elif isinstance(src, np.ndarray):\n infile = NumpyArrayInput(src, sample_in)\n stdin = src\n elif isinstance(src, BufferedReader):\n infile = FileBufferInput(src)\n stdin = infile.data # retrieving the data from the file reader (np array)\n else:\n infile = None\n\n # finding out which output encoding to use in case the output is ndarray\n if encoding_out is None and dst is np.ndarray:\n if isinstance(stdin, np.ndarray):\n encoding_out = stdin.dtype.type\n elif isinstance(stdin, str):\n encoding_out = np.float32\n # finding out which channel count to use (defaults to the input file's channel count)\n if channels_out is None:\n if infile is None:\n channels_out = 1\n else:\n channels_out = infile.channels\n if sample_out is None: # if the output samplerate isn't specified, default to input's\n sample_out = sample_in\n\n # same as for the input data, but for the destination\n if isinstance(dst, str):\n outfile = FilePathOutput(dst, sample_out, channels_out)\n elif dst is np.ndarray:\n outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)\n elif isinstance(dst, BufferedWriter):\n outfile = FileBufferOutput(dst, sample_out, channels_out)\n else:\n outfile = None\n\n cmd = shlex.split(\n ' '.join([\n 'sox',\n '-N',\n '-V1' if allow_clipping else '-V2',\n infile.cmd_prefix if infile is not None else '-d',\n outfile.cmd_suffix if outfile is not None else '-d',\n ] + list(map(str, self.command))),\n posix=False,\n )\n\n logger.debug(\"Running command : %s\" % cmd)\n if isinstance(stdin, np.ndarray):\n stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(stdin.tobytes(order='F'))\n else:\n stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n\n if stderr:\n raise RuntimeError(stderr.decode())\n elif stdout:\n outsound = np.frombuffer(stdout, dtype=encoding_out)\n if channels_out > 1:\n outsound = outsound.reshape((channels_out, int(len(outsound) / channels_out)), order='F')\n if isinstance(outfile, FileBufferOutput):\n outfile.write(outsound)\n return outsound\n",
"step-ids": [
22,
27,
29,
31,
42
]
}
|
[
22,
27,
29,
31,
42
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = 'playground'
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index=True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = (
"SELECT top_score, created FROM table3_links WHERE link_id='" +
d['link_id'] + "'")
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment=d['name'])
obj.update(top_score=d['score'])
else:
source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data'][
'permalink']
input['subreddit'] = data['data']['children'][0]['data'][
'subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data'][
'subreddit_id']
input['created'] = int(data['data']['children'][0]['data'][
'created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create(link_id=d['link_id'], title=input['title'],
permalink=input['permalink'], subreddit=input['subreddit'],
selftext=input['selftext'], subreddit_id=input[
'subreddit_id'], created=input['created'], url=input['url'],
score=input['score'], top_comment=d['name'], top_score=d[
'score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='" + d[
'link_id'] + "' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=
timegap)
timeslot.update(counts=count_tmp[0][0] + 1)
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1
)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = 'playground'
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index=True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = (
"SELECT top_score, created FROM table3_links WHERE link_id='" +
d['link_id'] + "'")
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment=d['name'])
obj.update(top_score=d['score'])
else:
source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data'][
'permalink']
input['subreddit'] = data['data']['children'][0]['data'][
'subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data'][
'subreddit_id']
input['created'] = int(data['data']['children'][0]['data'][
'created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create(link_id=d['link_id'], title=input['title'],
permalink=input['permalink'], subreddit=input['subreddit'],
selftext=input['selftext'], subreddit_id=input[
'subreddit_id'], created=input['created'], url=input['url'],
score=input['score'], top_comment=d['name'], top_score=d[
'score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='" + d[
'link_id'] + "' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=
timegap)
timeslot.update(counts=count_tmp[0][0] + 1)
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1
)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
<|reserved_special_token_0|>
test([])
rdd.foreachPartition(test)
<|reserved_special_token_1|>
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = 'playground'
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index=True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = (
"SELECT top_score, created FROM table3_links WHERE link_id='" +
d['link_id'] + "'")
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment=d['name'])
obj.update(top_score=d['score'])
else:
source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data'][
'permalink']
input['subreddit'] = data['data']['children'][0]['data'][
'subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data'][
'subreddit_id']
input['created'] = int(data['data']['children'][0]['data'][
'created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create(link_id=d['link_id'], title=input['title'],
permalink=input['permalink'], subreddit=input['subreddit'],
selftext=input['selftext'], subreddit_id=input[
'subreddit_id'], created=input['created'], url=input['url'],
score=input['score'], top_comment=d['name'], top_score=d[
'score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='" + d[
'link_id'] + "' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=
timegap)
timeslot.update(counts=count_tmp[0][0] + 1)
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1
)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
df = sqlContext.read.json('s3n://yy-data/testJSON.json')
rdd = df.map(lambda x: {'link_id': x.link_id, 'author': x.author, 'body': x
.body, 'created_utc': x.created_utc, 'parent_id': x.parent_id,
'subreddit': x.subreddit, 'subreddit_id': x.subreddit_id, 'name': x.
name, 'score': x.score})
test([])
rdd.foreachPartition(test)
<|reserved_special_token_1|>
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = "playground"
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index = True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = "SELECT top_score, created FROM table3_links WHERE link_id='"+d['link_id']+"'"
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment = d['name'])
obj.update(top_score = d['score'])
else:
source = "http://www.reddit.com/by_id/"+d['link_id']+"/.json"
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data']['permalink']
input['subreddit'] = data['data']['children'][0]['data']['subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id']
input['created'] = int(data['data']['children'][0]['data']['created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create( link_id = d['link_id'],
title = input['title'],
permalink = input['permalink'],
subreddit = input['subreddit'],
selftext = input['selftext'],
subreddit_id = input['subreddit_id'],
created = input['created'],
url = input['url'],
score = input['score'],
top_comment = d['name'],
top_score = d['score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='"+d['link_id']+"' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)
timeslot.update(counts=(count_tmp[0][0]+1))
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
df = sqlContext.read.json("s3n://yy-data/testJSON.json")
# s3n://reddit-comments/2007/RC_2007-10
rdd = df.map(lambda x: {"link_id": x.link_id,
"author": x.author,
"body": x.body,
"created_utc": x.created_utc,
"parent_id": x.parent_id,
"subreddit": x.subreddit,
"subreddit_id": x.subreddit_id,
"name": x.name,
"score": x.score})
test([])
rdd.foreachPartition(test)
|
flexible
|
{
"blob_id": "11f29508d52e856f4751a5dc8911a1f1c9832374",
"index": 944,
"step-1": "<mask token>\n",
"step-2": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<mask token>\n",
"step-3": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<mask token>\ntest([])\nrdd.foreachPartition(test)\n",
"step-4": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\ndf = sqlContext.read.json('s3n://yy-data/testJSON.json')\nrdd = df.map(lambda x: {'link_id': x.link_id, 'author': x.author, 'body': x\n .body, 'created_utc': x.created_utc, 'parent_id': x.parent_id,\n 'subreddit': x.subreddit, 'subreddit_id': x.subreddit_id, 'name': x.\n name, 'score': x.score})\ntest([])\nrdd.foreachPartition(test)\n",
"step-5": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n\n CASSANDRA_KEYSPACE = \"playground\"\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index = True)\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = \"SELECT top_score, created FROM table3_links WHERE link_id='\"+d['link_id']+\"'\"\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment = d['name'])\n obj.update(top_score = d['score'])\n else:\n source = \"http://www.reddit.com/by_id/\"+d['link_id']+\"/.json\"\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data']['permalink']\n input['subreddit'] = data['data']['children'][0]['data']['subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id'] \n input['created'] = int(data['data']['children'][0]['data']['created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create( link_id = d['link_id'],\n title = input['title'],\n permalink = input['permalink'],\n subreddit = input['subreddit'],\n selftext = input['selftext'],\n subreddit_id = input['subreddit_id'],\n created = input['created'],\n url = input['url'],\n score = input['score'],\n top_comment = d['name'],\n top_score = d['score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\"+d['link_id']+\"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)\n timeslot.update(counts=(count_tmp[0][0]+1))\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\ndf = sqlContext.read.json(\"s3n://yy-data/testJSON.json\")\n# s3n://reddit-comments/2007/RC_2007-10\nrdd = df.map(lambda x: {\"link_id\": x.link_id, \n \"author\": x.author,\n \"body\": x.body,\n \"created_utc\": x.created_utc,\n \"parent_id\": x.parent_id,\n \"subreddit\": x.subreddit,\n \"subreddit_id\": x.subreddit_id,\n \"name\": x.name,\n \"score\": x.score})\ntest([])\nrdd.foreachPartition(test)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon
# are the lat/lon values of the lower left and upper right corners
# of the map.
# resolution = 'c' means use crude resolution coastlines.
m = Basemap(projection='cea',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=-180,urcrnrlon=180,resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,91.,30.))
m.drawmeridians(np.arange(-180.,181.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Cylindrical Equal-Area Projection")
plt.show()
|
normal
|
{
"blob_id": "f5f9a1c7dcb7345e24f50db54649a1970fc37185",
"index": 1262,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nm.drawcoastlines()\nm.fillcontinents(color='coral', lake_color='aqua')\nm.drawparallels(np.arange(-90.0, 91.0, 30.0))\nm.drawmeridians(np.arange(-180.0, 181.0, 60.0))\nm.drawmapboundary(fill_color='aqua')\nplt.title('Cylindrical Equal-Area Projection')\nplt.show()\n",
"step-3": "<mask token>\nm = Basemap(projection='cea', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,\n urcrnrlon=180, resolution='c')\nm.drawcoastlines()\nm.fillcontinents(color='coral', lake_color='aqua')\nm.drawparallels(np.arange(-90.0, 91.0, 30.0))\nm.drawmeridians(np.arange(-180.0, 181.0, 60.0))\nm.drawmapboundary(fill_color='aqua')\nplt.title('Cylindrical Equal-Area Projection')\nplt.show()\n",
"step-4": "from mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport matplotlib.pyplot as plt\nm = Basemap(projection='cea', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,\n urcrnrlon=180, resolution='c')\nm.drawcoastlines()\nm.fillcontinents(color='coral', lake_color='aqua')\nm.drawparallels(np.arange(-90.0, 91.0, 30.0))\nm.drawmeridians(np.arange(-180.0, 181.0, 60.0))\nm.drawmapboundary(fill_color='aqua')\nplt.title('Cylindrical Equal-Area Projection')\nplt.show()\n",
"step-5": "from mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport matplotlib.pyplot as plt\n# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon\n# are the lat/lon values of the lower left and upper right corners\n# of the map.\n# resolution = 'c' means use crude resolution coastlines.\nm = Basemap(projection='cea',llcrnrlat=-90,urcrnrlat=90,\\\n llcrnrlon=-180,urcrnrlon=180,resolution='c')\nm.drawcoastlines()\nm.fillcontinents(color='coral',lake_color='aqua')\n# draw parallels and meridians.\nm.drawparallels(np.arange(-90.,91.,30.))\nm.drawmeridians(np.arange(-180.,181.,60.))\nm.drawmapboundary(fill_color='aqua')\nplt.title(\"Cylindrical Equal-Area Projection\")\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class FileStorage:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
<|reserved_special_token_0|>
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileStorage:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FileStorage:
"""FileStorage class"""
__file_path = 'file.json'
__objects = {}
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
from models.base_model import BaseModel
import models
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
class FileStorage:
"""FileStorage class"""
__file_path = 'file.json'
__objects = {}
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
<|reserved_special_token_1|>
#!/usr/bin/python3
''' FileStorage module '''
import json
from models.base_model import BaseModel
import models
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
class FileStorage:
'''FileStorage class'''
__file_path = 'file.json'
__objects = {}
def all(self):
'''
Return:
the dictionary __objects
'''
return self.__objects
def new(self, obj):
'''
sets in objects with key classname.id
Args:
object
'''
self.__objects["{}.{}".format(obj.__class__.__name__, obj.id)] = obj
def save(self):
'''
serializes __objects to JSON file
'''
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
'''
deserializes the JSON file
'''
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
|
flexible
|
{
"blob_id": "5461d50d3c06bc4276044cc77bd804f6e7c16b3b",
"index": 1278,
"step-1": "<mask token>\n\n\nclass FileStorage:\n <mask token>\n <mask token>\n <mask token>\n\n def all(self):\n \"\"\"\n Return:\n the dictionary __objects\n \"\"\"\n return self.__objects\n <mask token>\n\n def save(self):\n \"\"\"\n serializes __objects to JSON file\n \"\"\"\n newdict = {}\n with open(self.__file_path, mode='w+', encoding='utf-8') as f:\n for k, v in self.__objects.items():\n newdict[k] = v.to_dict()\n json.dump(newdict, f)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FileStorage:\n <mask token>\n <mask token>\n <mask token>\n\n def all(self):\n \"\"\"\n Return:\n the dictionary __objects\n \"\"\"\n return self.__objects\n\n def new(self, obj):\n \"\"\"\n sets in objects with key classname.id\n\n Args:\n object\n \"\"\"\n self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj\n\n def save(self):\n \"\"\"\n serializes __objects to JSON file\n \"\"\"\n newdict = {}\n with open(self.__file_path, mode='w+', encoding='utf-8') as f:\n for k, v in self.__objects.items():\n newdict[k] = v.to_dict()\n json.dump(newdict, f)\n\n def reload(self):\n \"\"\"\n deserializes the JSON file\n \"\"\"\n try:\n with open(self.__file_path, mode='r', encoding='utf-8') as f:\n newobjects = json.load(f)\n for k, v in newobjects.items():\n reloadedobj = eval('{}(**v)'.format(v['__class__']))\n self.__objects[k] = reloadedobj\n except IOError:\n pass\n",
"step-3": "<mask token>\n\n\nclass FileStorage:\n \"\"\"FileStorage class\"\"\"\n __file_path = 'file.json'\n __objects = {}\n\n def all(self):\n \"\"\"\n Return:\n the dictionary __objects\n \"\"\"\n return self.__objects\n\n def new(self, obj):\n \"\"\"\n sets in objects with key classname.id\n\n Args:\n object\n \"\"\"\n self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj\n\n def save(self):\n \"\"\"\n serializes __objects to JSON file\n \"\"\"\n newdict = {}\n with open(self.__file_path, mode='w+', encoding='utf-8') as f:\n for k, v in self.__objects.items():\n newdict[k] = v.to_dict()\n json.dump(newdict, f)\n\n def reload(self):\n \"\"\"\n deserializes the JSON file\n \"\"\"\n try:\n with open(self.__file_path, mode='r', encoding='utf-8') as f:\n newobjects = json.load(f)\n for k, v in newobjects.items():\n reloadedobj = eval('{}(**v)'.format(v['__class__']))\n self.__objects[k] = reloadedobj\n except IOError:\n pass\n",
"step-4": "<mask token>\nimport json\nfrom models.base_model import BaseModel\nimport models\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass FileStorage:\n \"\"\"FileStorage class\"\"\"\n __file_path = 'file.json'\n __objects = {}\n\n def all(self):\n \"\"\"\n Return:\n the dictionary __objects\n \"\"\"\n return self.__objects\n\n def new(self, obj):\n \"\"\"\n sets in objects with key classname.id\n\n Args:\n object\n \"\"\"\n self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj\n\n def save(self):\n \"\"\"\n serializes __objects to JSON file\n \"\"\"\n newdict = {}\n with open(self.__file_path, mode='w+', encoding='utf-8') as f:\n for k, v in self.__objects.items():\n newdict[k] = v.to_dict()\n json.dump(newdict, f)\n\n def reload(self):\n \"\"\"\n deserializes the JSON file\n \"\"\"\n try:\n with open(self.__file_path, mode='r', encoding='utf-8') as f:\n newobjects = json.load(f)\n for k, v in newobjects.items():\n reloadedobj = eval('{}(**v)'.format(v['__class__']))\n self.__objects[k] = reloadedobj\n except IOError:\n pass\n",
"step-5": "#!/usr/bin/python3\n''' FileStorage module '''\nimport json\nfrom models.base_model import BaseModel\nimport models\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass FileStorage:\n '''FileStorage class'''\n\n __file_path = 'file.json'\n __objects = {}\n\n def all(self):\n '''\n Return:\n the dictionary __objects\n '''\n return self.__objects\n\n def new(self, obj):\n '''\n sets in objects with key classname.id\n\n Args:\n object\n '''\n self.__objects[\"{}.{}\".format(obj.__class__.__name__, obj.id)] = obj\n\n def save(self):\n '''\n serializes __objects to JSON file\n '''\n newdict = {}\n with open(self.__file_path, mode='w+', encoding='utf-8') as f:\n for k, v in self.__objects.items():\n newdict[k] = v.to_dict()\n json.dump(newdict, f)\n\n def reload(self):\n '''\n deserializes the JSON file\n '''\n try:\n with open(self.__file_path, mode='r', encoding='utf-8') as f:\n newobjects = json.load(f)\n for k, v in newobjects.items():\n reloadedobj = eval('{}(**v)'.format(v['__class__']))\n self.__objects[k] = reloadedobj\n\n except IOError:\n pass\n",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
import os
from xml.dom import minidom
import numpy as np
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
# print"\ndetector-Id: %s" % detectorID.childNodes[0].data
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
# laneNumber = lane.getElementsByTagName('lane-Number')[0]
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == "OK":
try:
laneVolume += int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName('lane-Occupancy')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName('lane-Speed')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy/float(laneVolume))
detectorData[c][4].append(laneSpeed/float(laneVolume))
month_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'
os.chdir(month_dir) # change the current working directory to path.
day_dir = get_branches_dir(month_dir)
detNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D', 'MI044E250.8D', 'MI044E246.6D']
ErrorFiles = []
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/'+detNames[i])
np.save(detectorData[0][0][0]+'.npy', m)
|
normal
|
{
"blob_id": "2b7bb02a25504e7481d3bc637ea09bcf9addb990",
"index": 7699,
"step-1": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\n<mask token>\nos.chdir(month_dir)\n<mask token>\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-3": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir)\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',\n 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-4": "import os\nfrom xml.dom import minidom\nimport numpy as np\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir)\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',\n 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-5": "import os\nfrom xml.dom import minidom\nimport numpy as np\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n # print\"\\ndetector-Id: %s\" % detectorID.childNodes[0].data\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n # laneNumber = lane.getElementsByTagName('lane-Number')[0]\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == \"OK\":\n try:\n laneVolume += int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName('lane-Occupancy')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName('lane-Speed')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n except IndexError:\n break\n else:\n break\n\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy/float(laneVolume))\n detectorData[c][4].append(laneSpeed/float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir) # change the current working directory to path.\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D', 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/'+detNames[i])\n np.save(detectorData[0][0][0]+'.npy', m)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
screen = pg.display.set_mode((640, 380))
<|reserved_special_token_1|>
import pygame as pg
screen = pg.display.set_mode((640, 380))
|
flexible
|
{
"blob_id": "c1374a048187807deac5d28dda4fbc7beeccf8f5",
"index": 5221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nscreen = pg.display.set_mode((640, 380))\n",
"step-3": "import pygame as pg\nscreen = pg.display.set_mode((640, 380))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import requests
import sqlite3
url = 'http://dummy.restapiexample.com/api/v1/employees'
r = requests.get(url)
packages_json = r.json()
# Create the employee database if it does not exist
db = sqlite3.connect('employee.sqlite')
#create the table
db.execute("CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)")
#db.execute("INSERT INTO employee(id, employee_name, employee_salary, employee_age, profile_image) VALUES(1, 'Levi', 50000, 24, '')")
# Loop through each employee information and insert into database
for employee in packages_json['data']:
db.execute("INSERT INTO employee VALUES (?, ?, ?, ?, ?)", [employee["id"], employee["employee_name"], employee["employee_salary"], employee["employee_age"],employee["profile_image"]])
db.commit()
db.close()
|
normal
|
{
"blob_id": "497203be99643e2bb0087977f292f4ed890f9ead",
"index": 7111,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-3": "<mask token>\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\nr = requests.get(url)\npackages_json = r.json()\ndb = sqlite3.connect('employee.sqlite')\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-4": "import requests\nimport sqlite3\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\nr = requests.get(url)\npackages_json = r.json()\ndb = sqlite3.connect('employee.sqlite')\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-5": "import requests\r\nimport sqlite3\r\n\r\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\r\n\r\nr = requests.get(url)\r\npackages_json = r.json()\r\n\r\n# Create the employee database if it does not exist\r\ndb = sqlite3.connect('employee.sqlite')\r\n#create the table\r\ndb.execute(\"CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)\")\r\n#db.execute(\"INSERT INTO employee(id, employee_name, employee_salary, employee_age, profile_image) VALUES(1, 'Levi', 50000, 24, '')\")\r\n\r\n# Loop through each employee information and insert into database\r\nfor employee in packages_json['data']:\r\n db.execute(\"INSERT INTO employee VALUES (?, ?, ?, ?, ?)\", [employee[\"id\"], employee[\"employee_name\"], employee[\"employee_salary\"], employee[\"employee_age\"],employee[\"profile_image\"]])\r\n db.commit()\r\ndb.close()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
ConstantsCommands.py
"""
TEST_HEAD = "\n >>>>>> " \
"\n >>>>>> Test in progress: {0}" \
"\n >>>>>>"
TEST_TAIL = ">>>>>> Test execution done, tearDown\n\r"
|
normal
|
{
"blob_id": "45f0a7a78184195a593061d863ff2114abe01a46",
"index": 6321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nTEST_HEAD = \"\"\"\n >>>>>> \n >>>>>> Test in progress: {0}\n >>>>>>\"\"\"\nTEST_TAIL = '>>>>>> Test execution done, tearDown\\n\\r'\n",
"step-3": "\"\"\"\nConstantsCommands.py\n\"\"\"\n\nTEST_HEAD = \"\\n >>>>>> \" \\\n \"\\n >>>>>> Test in progress: {0}\" \\\n \"\\n >>>>>>\"\n\nTEST_TAIL = \">>>>>> Test execution done, tearDown\\n\\r\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# read in file of customs declaration responses
declarations_file = open('day6_declarations.txt', 'r')
lines = declarations_file.readlines()
# initialise variables
group_responses = [] # temporary container for all responses of each group member
count_any_member_has_response = 0 # count for part 1
count_all_members_have_response = 0 # count for part 2
# loop over file
for line in lines:
# if have a blank line (or at end of file), means we have reached end of
# an group's info, so save declaration response info for current group
# and reset group_responses list
if line == '\n' or line == lines[-1]:
# case where at end of file, want to save that last line
if line == lines[-1]:
# remove newlines at end of lines and split by whitespace
line = line.strip()
group_responses.append(line)
#print(group_responses)
# PART 1
# for each group, count the number of questions to which ANYONE responded "yes"
# what is the sum of those counts?
# each group member has their responses as one element in group_responses
# so flatten this so each char of each group member now makes up one element
group_responses_flattened = [item for sublist in group_responses for item in sublist]
# there will be duplicates in the flattened array
# first part wants the total number of UNIQUE elements so convert to set
group_responses_set = set(group_responses_flattened)
#print(group_responses_set)
# count number of unique elements in the set and add this to
# the count_any_member_has_response var which keeps track of the total count
# for all groups
count_any_member_has_response += len(group_responses_set)
# PART 2
# for each group, count the number of questions to which EVERYONE answered "yes"
# what is the sum of those counts?
# easiest way is to look at first group member
# how many of the characters for the first group member
# appear for ALL the other group members
for char in group_responses[0]:
char_in_all_members = True
# see if char exists for all other group members - if not then set
# char_in_all_members to False
for item in group_responses:
if char not in item:
char_in_all_members = False
# if char appears for all members, add one to
# count_all_members_have_response var which keeps track of the total count
# for all groups
if char_in_all_members == True:
#print('char', char, 'exists for all members of this group')
count_all_members_have_response += 1
# finished processing this group so reset the temp var group_responses
# so it can be filled again for the next group
group_responses = []
else:
# we are still in the same group so continue adding
# group member responses to group_responses list
line = line.strip()
group_responses.append(line)
# print out final counts for parts 1 and 2
print('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response)
print('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response)
|
normal
|
{
"blob_id": "cb6ed6422a5591f1de0a947f75ad080f250e8443",
"index": 7718,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in lines:\n if line == '\\n' or line == lines[-1]:\n if line == lines[-1]:\n line = line.strip()\n group_responses.append(line)\n group_responses_flattened = [item for sublist in group_responses for\n item in sublist]\n group_responses_set = set(group_responses_flattened)\n count_any_member_has_response += len(group_responses_set)\n for char in group_responses[0]:\n char_in_all_members = True\n for item in group_responses:\n if char not in item:\n char_in_all_members = False\n if char_in_all_members == True:\n count_all_members_have_response += 1\n group_responses = []\n else:\n line = line.strip()\n group_responses.append(line)\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =',\n count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =',\n count_all_members_have_response)\n",
"step-3": "declarations_file = open('day6_declarations.txt', 'r')\nlines = declarations_file.readlines()\ngroup_responses = []\ncount_any_member_has_response = 0\ncount_all_members_have_response = 0\nfor line in lines:\n if line == '\\n' or line == lines[-1]:\n if line == lines[-1]:\n line = line.strip()\n group_responses.append(line)\n group_responses_flattened = [item for sublist in group_responses for\n item in sublist]\n group_responses_set = set(group_responses_flattened)\n count_any_member_has_response += len(group_responses_set)\n for char in group_responses[0]:\n char_in_all_members = True\n for item in group_responses:\n if char not in item:\n char_in_all_members = False\n if char_in_all_members == True:\n count_all_members_have_response += 1\n group_responses = []\n else:\n line = line.strip()\n group_responses.append(line)\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =',\n count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =',\n count_all_members_have_response)\n",
"step-4": "\n\n# read in file of customs declaration responses\ndeclarations_file = open('day6_declarations.txt', 'r')\nlines = declarations_file.readlines()\n\n# initialise variables\ngroup_responses = [] # temporary container for all responses of each group member\ncount_any_member_has_response = 0 # count for part 1\ncount_all_members_have_response = 0 # count for part 2\n\n\n# loop over file\nfor line in lines:\n\n\t# if have a blank line (or at end of file), means we have reached end of \n\t# an group's info, so save declaration response info for current group \n\t# and reset group_responses list\n\n\tif line == '\\n' or line == lines[-1]:\n\n\t\t# case where at end of file, want to save that last line\n\t\tif line == lines[-1]:\n\t\t\t\n\t\t\t# remove newlines at end of lines and split by whitespace\n\t\t\tline = line.strip()\n\t\t\tgroup_responses.append(line)\n\t\t\t\n\t\t#print(group_responses)\n\n\t\t\n\n\n\t\t# PART 1\n\t\t# for each group, count the number of questions to which ANYONE responded \"yes\" \n\t\t# what is the sum of those counts?\n\n\t\t# each group member has their responses as one element in group_responses\n\t\t# so flatten this so each char of each group member now makes up one element\n\t\tgroup_responses_flattened = [item for sublist in group_responses for item in sublist]\n\n\t\t# there will be duplicates in the flattened array\n\t\t# first part wants the total number of UNIQUE elements so convert to set\n\t\tgroup_responses_set = set(group_responses_flattened)\n\t\t#print(group_responses_set)\n\n\t\t# count number of unique elements in the set and add this to \n\t\t# the count_any_member_has_response var which keeps track of the total count\n\t\t# for all groups\n\t\tcount_any_member_has_response += len(group_responses_set)\n\n\n\n\n\n\n\t\t# PART 2\n\t\t# for each group, count the number of questions to which EVERYONE answered \"yes\"\n\t\t# what is the sum of those counts?\n\n\t\t# easiest way is to look at first group member\n\t\t# how many of the characters for the first group member\n\t\t# appear for ALL the other group members\n\t\tfor char in group_responses[0]:\n\n\t\t\tchar_in_all_members = True\n\n\t\t\t# see if char exists for all other group members - if not then set\n\t\t\t# char_in_all_members to False\n\t\t\tfor item in group_responses:\n\t\t\t\t\n\t\t\t\tif char not in item:\n\n\t\t\t\t\tchar_in_all_members = False\n\n\t\t\t# if char appears for all members, add one to\n\t\t\t# count_all_members_have_response var which keeps track of the total count\n\t\t\t# for all groups\n\t\t\tif char_in_all_members == True:\n\t\t\t\t#print('char', char, 'exists for all members of this group')\n\t\t\t\tcount_all_members_have_response += 1\n\n\t\t# finished processing this group so reset the temp var group_responses\n\t\t# so it can be filled again for the next group\n\t\tgroup_responses = []\n\t\n\t\n\n\n\n\n\telse:\n\n\t\t# we are still in the same group so continue adding \n\t\t# group member responses to group_responses list\n\t\tline = line.strip()\n\t\tgroup_responses.append(line)\n\t\t\n\n\n\n# print out final counts for parts 1 and 2\nprint('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response)\nprint('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response)\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ValidateWindowCtr(object):
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,
vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,
index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,
im_segmen)
self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,
vol_truth, vol_segmen)
self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,
ax_segmen)
self.index_trans, self.index_truth, self.index_segmen = (
index_trans, index_truth, index_segmen)
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self
.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.
index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(
self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
<|reserved_special_token_0|>
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def segmen_subplot_scroll(self, event):
if event.button == 'down' and self.index_segmen[-1
] > -1 * self.vol_segmen.shape[0]:
self.index_segmen[-1] -= 1
if event.button == 'up' and self.index_segmen[-1
] < self.vol_segmen.shape[0] - 1:
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ValidateWindowCtr(object):
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,
vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,
index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,
im_segmen)
self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,
vol_truth, vol_segmen)
self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,
ax_segmen)
self.index_trans, self.index_truth, self.index_segmen = (
index_trans, index_truth, index_segmen)
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self
.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.
index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(
self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
<|reserved_special_token_0|>
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
<|reserved_special_token_0|>
def truth_subplot_scroll(self, event):
if event.button == 'down' and self.index_truth[-1
] > -1 * self.vol_truth.shape[0]:
self.index_truth[-1] -= 1
if event.button == 'up' and self.index_truth[-1
] < self.vol_truth.shape[0] - 1:
self.index_truth[-1] += 1
self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])
self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))
self.fig.canvas.draw_idle()
def segmen_subplot_scroll(self, event):
if event.button == 'down' and self.index_segmen[-1
] > -1 * self.vol_segmen.shape[0]:
self.index_segmen[-1] -= 1
if event.button == 'up' and self.index_segmen[-1
] < self.vol_segmen.shape[0] - 1:
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ValidateWindowCtr(object):
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,
vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,
index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,
im_segmen)
self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,
vol_truth, vol_segmen)
self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,
ax_segmen)
self.index_trans, self.index_truth, self.index_segmen = (
index_trans, index_truth, index_segmen)
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self
.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.
index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(
self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
def fig_enter_event(self, event):
if self.ax_trans.in_axes(event):
self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event',
self.trans_subplot_scroll)
elif self.ax_truth.in_axes(event):
self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event',
self.truth_subplot_scroll)
elif self.ax_segmen.in_axes(event):
self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event',
self.segmen_subplot_scroll)
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
<|reserved_special_token_0|>
def truth_subplot_scroll(self, event):
if event.button == 'down' and self.index_truth[-1
] > -1 * self.vol_truth.shape[0]:
self.index_truth[-1] -= 1
if event.button == 'up' and self.index_truth[-1
] < self.vol_truth.shape[0] - 1:
self.index_truth[-1] += 1
self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])
self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))
self.fig.canvas.draw_idle()
def segmen_subplot_scroll(self, event):
if event.button == 'down' and self.index_segmen[-1
] > -1 * self.vol_segmen.shape[0]:
self.index_segmen[-1] -= 1
if event.button == 'up' and self.index_segmen[-1
] < self.vol_segmen.shape[0] - 1:
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ValidateWindowCtr(object):
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,
vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,
index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,
im_segmen)
self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,
vol_truth, vol_segmen)
self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,
ax_segmen)
self.index_trans, self.index_truth, self.index_segmen = (
index_trans, index_truth, index_segmen)
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self
.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.
index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(
self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
def fig_enter_event(self, event):
if self.ax_trans.in_axes(event):
self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event',
self.trans_subplot_scroll)
elif self.ax_truth.in_axes(event):
self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event',
self.truth_subplot_scroll)
elif self.ax_segmen.in_axes(event):
self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event',
self.segmen_subplot_scroll)
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
def trans_subplot_scroll(self, event):
if event.button == 'down' and self.index_trans[-1
] > -1 * self.vol_trans.shape[0]:
self.index_trans[-1] -= 1
if event.button == 'up' and self.index_trans[-1
] < self.vol_trans.shape[0] - 1:
self.index_trans[-1] += 1
self.im_trans.set_data(self.vol_trans[self.index_trans[-1]])
self.txt_trans.set_text('Slice No: ' + str(self.index_trans[-1]))
self.fig.canvas.draw_idle()
def truth_subplot_scroll(self, event):
if event.button == 'down' and self.index_truth[-1
] > -1 * self.vol_truth.shape[0]:
self.index_truth[-1] -= 1
if event.button == 'up' and self.index_truth[-1
] < self.vol_truth.shape[0] - 1:
self.index_truth[-1] += 1
self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])
self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))
self.fig.canvas.draw_idle()
def segmen_subplot_scroll(self, event):
if event.button == 'down' and self.index_segmen[-1
] > -1 * self.vol_segmen.shape[0]:
self.index_segmen[-1] -= 1
if event.button == 'up' and self.index_segmen[-1
] < self.vol_segmen.shape[0] - 1:
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 12:47:30 2019
Title: MP4-Medical Image Processing
@author: MP4 Team
"""
# Validate window controller
class ValidateWindowCtr(object):
# Initialization
def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans, vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans, index_truth, index_segmen):
self.fig = fig
self.im_trans, self.im_truth, self.im_segmen = im_trans, im_truth, im_segmen
self.vol_trans, self.vol_truth, self.vol_segmen = vol_trans, vol_truth, vol_segmen
self.ax_trans, self.ax_truth, self.ax_segmen = ax_trans, ax_truth, ax_segmen
self.index_trans, self.index_truth, self.index_segmen = index_trans, index_truth, index_segmen
self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: '+str(self.index_trans[-1]), color='b')
self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: '+str(self.index_truth[-1]), color='b')
self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: '+str(self.index_segmen[-1]), color='b')
self.scroll_trans = None
self.scroll_truth = None
self.scroll_segmen = None
self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)
self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)
# Enable scrolling image
def fig_enter_event(self, event):
if self.ax_trans.in_axes(event):
self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event', self.trans_subplot_scroll)
elif self.ax_truth.in_axes(event):
self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event', self.truth_subplot_scroll)
elif self.ax_segmen.in_axes(event):
self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event', self.segmen_subplot_scroll)
# Disable scrolling image
def fig_leave_event(self, event):
self.fig.canvas.mpl_disconnect(self.scroll_trans)
self.fig.canvas.mpl_disconnect(self.scroll_truth)
self.fig.canvas.mpl_disconnect(self.scroll_segmen)
# Scroll voxel image
def trans_subplot_scroll(self, event):
if event.button == 'down' and (self.index_trans[-1] > -1*self.vol_trans.shape[0]):
self.index_trans[-1] -= 1
if event.button == 'up' and (self.index_trans[-1] < self.vol_trans.shape[0]-1):
self.index_trans[-1] += 1
self.im_trans.set_data(self.vol_trans[self.index_trans[-1]])
self.txt_trans.set_text('Slice No: '+str(self.index_trans[-1]))
self.fig.canvas.draw_idle()
# Scroll ground truth image
def truth_subplot_scroll(self, event):
if event.button == 'down' and (self.index_truth[-1] > -1*self.vol_truth.shape[0]):
self.index_truth[-1] -= 1
if event.button == 'up' and (self.index_truth[-1] < self.vol_truth.shape[0]-1):
self.index_truth[-1] += 1
self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])
self.txt_truth.set_text('Slice No: '+str(self.index_truth[-1]))
self.fig.canvas.draw_idle()
# Scroll segmented image
def segmen_subplot_scroll(self, event):
if event.button == 'down' and (self.index_segmen[-1] > -1*self.vol_segmen.shape[0]):
self.index_segmen[-1] -= 1
if event.button == 'up' and (self.index_segmen[-1] < self.vol_segmen.shape[0]-1):
self.index_segmen[-1] += 1
self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])
self.txt_segmen.set_text('Slice No: '+str(self.index_segmen[-1]))
self.fig.canvas.draw_idle()
|
flexible
|
{
"blob_id": "e0b28fdcbc3160bcccbb032949317a91a32eeb1b",
"index": 5394,
"step-1": "<mask token>\n\n\nclass ValidateWindowCtr(object):\n\n def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,\n vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,\n index_truth, index_segmen):\n self.fig = fig\n self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,\n im_segmen)\n self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,\n vol_truth, vol_segmen)\n self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,\n ax_segmen)\n self.index_trans, self.index_truth, self.index_segmen = (\n index_trans, index_truth, index_segmen)\n self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self\n .index_trans[-1]), color='b')\n self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.\n index_truth[-1]), color='b')\n self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(\n self.index_segmen[-1]), color='b')\n self.scroll_trans = None\n self.scroll_truth = None\n self.scroll_segmen = None\n self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)\n self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)\n <mask token>\n\n def fig_leave_event(self, event):\n self.fig.canvas.mpl_disconnect(self.scroll_trans)\n self.fig.canvas.mpl_disconnect(self.scroll_truth)\n self.fig.canvas.mpl_disconnect(self.scroll_segmen)\n <mask token>\n <mask token>\n\n def segmen_subplot_scroll(self, event):\n if event.button == 'down' and self.index_segmen[-1\n ] > -1 * self.vol_segmen.shape[0]:\n self.index_segmen[-1] -= 1\n if event.button == 'up' and self.index_segmen[-1\n ] < self.vol_segmen.shape[0] - 1:\n self.index_segmen[-1] += 1\n self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])\n self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))\n self.fig.canvas.draw_idle()\n",
"step-2": "<mask token>\n\n\nclass ValidateWindowCtr(object):\n\n def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,\n vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,\n index_truth, index_segmen):\n self.fig = fig\n self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,\n im_segmen)\n self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,\n vol_truth, vol_segmen)\n self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,\n ax_segmen)\n self.index_trans, self.index_truth, self.index_segmen = (\n index_trans, index_truth, index_segmen)\n self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self\n .index_trans[-1]), color='b')\n self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.\n index_truth[-1]), color='b')\n self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(\n self.index_segmen[-1]), color='b')\n self.scroll_trans = None\n self.scroll_truth = None\n self.scroll_segmen = None\n self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)\n self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)\n <mask token>\n\n def fig_leave_event(self, event):\n self.fig.canvas.mpl_disconnect(self.scroll_trans)\n self.fig.canvas.mpl_disconnect(self.scroll_truth)\n self.fig.canvas.mpl_disconnect(self.scroll_segmen)\n <mask token>\n\n def truth_subplot_scroll(self, event):\n if event.button == 'down' and self.index_truth[-1\n ] > -1 * self.vol_truth.shape[0]:\n self.index_truth[-1] -= 1\n if event.button == 'up' and self.index_truth[-1\n ] < self.vol_truth.shape[0] - 1:\n self.index_truth[-1] += 1\n self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])\n self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))\n self.fig.canvas.draw_idle()\n\n def segmen_subplot_scroll(self, event):\n if event.button == 'down' and self.index_segmen[-1\n ] > -1 * self.vol_segmen.shape[0]:\n self.index_segmen[-1] -= 1\n if event.button == 'up' and self.index_segmen[-1\n ] < self.vol_segmen.shape[0] - 1:\n self.index_segmen[-1] += 1\n self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])\n self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))\n self.fig.canvas.draw_idle()\n",
"step-3": "<mask token>\n\n\nclass ValidateWindowCtr(object):\n\n def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,\n vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,\n index_truth, index_segmen):\n self.fig = fig\n self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,\n im_segmen)\n self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,\n vol_truth, vol_segmen)\n self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,\n ax_segmen)\n self.index_trans, self.index_truth, self.index_segmen = (\n index_trans, index_truth, index_segmen)\n self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self\n .index_trans[-1]), color='b')\n self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.\n index_truth[-1]), color='b')\n self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(\n self.index_segmen[-1]), color='b')\n self.scroll_trans = None\n self.scroll_truth = None\n self.scroll_segmen = None\n self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)\n self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)\n\n def fig_enter_event(self, event):\n if self.ax_trans.in_axes(event):\n self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event',\n self.trans_subplot_scroll)\n elif self.ax_truth.in_axes(event):\n self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event',\n self.truth_subplot_scroll)\n elif self.ax_segmen.in_axes(event):\n self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event',\n self.segmen_subplot_scroll)\n\n def fig_leave_event(self, event):\n self.fig.canvas.mpl_disconnect(self.scroll_trans)\n self.fig.canvas.mpl_disconnect(self.scroll_truth)\n self.fig.canvas.mpl_disconnect(self.scroll_segmen)\n <mask token>\n\n def truth_subplot_scroll(self, event):\n if event.button == 'down' and self.index_truth[-1\n ] > -1 * self.vol_truth.shape[0]:\n self.index_truth[-1] -= 1\n if event.button == 'up' and self.index_truth[-1\n ] < self.vol_truth.shape[0] - 1:\n self.index_truth[-1] += 1\n self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])\n self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))\n self.fig.canvas.draw_idle()\n\n def segmen_subplot_scroll(self, event):\n if event.button == 'down' and self.index_segmen[-1\n ] > -1 * self.vol_segmen.shape[0]:\n self.index_segmen[-1] -= 1\n if event.button == 'up' and self.index_segmen[-1\n ] < self.vol_segmen.shape[0] - 1:\n self.index_segmen[-1] += 1\n self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])\n self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))\n self.fig.canvas.draw_idle()\n",
"step-4": "<mask token>\n\n\nclass ValidateWindowCtr(object):\n\n def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans,\n vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans,\n index_truth, index_segmen):\n self.fig = fig\n self.im_trans, self.im_truth, self.im_segmen = (im_trans, im_truth,\n im_segmen)\n self.vol_trans, self.vol_truth, self.vol_segmen = (vol_trans,\n vol_truth, vol_segmen)\n self.ax_trans, self.ax_truth, self.ax_segmen = (ax_trans, ax_truth,\n ax_segmen)\n self.index_trans, self.index_truth, self.index_segmen = (\n index_trans, index_truth, index_segmen)\n self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: ' + str(self\n .index_trans[-1]), color='b')\n self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: ' + str(self.\n index_truth[-1]), color='b')\n self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: ' + str(\n self.index_segmen[-1]), color='b')\n self.scroll_trans = None\n self.scroll_truth = None\n self.scroll_segmen = None\n self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)\n self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)\n\n def fig_enter_event(self, event):\n if self.ax_trans.in_axes(event):\n self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event',\n self.trans_subplot_scroll)\n elif self.ax_truth.in_axes(event):\n self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event',\n self.truth_subplot_scroll)\n elif self.ax_segmen.in_axes(event):\n self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event',\n self.segmen_subplot_scroll)\n\n def fig_leave_event(self, event):\n self.fig.canvas.mpl_disconnect(self.scroll_trans)\n self.fig.canvas.mpl_disconnect(self.scroll_truth)\n self.fig.canvas.mpl_disconnect(self.scroll_segmen)\n\n def trans_subplot_scroll(self, event):\n if event.button == 'down' and self.index_trans[-1\n ] > -1 * self.vol_trans.shape[0]:\n self.index_trans[-1] -= 1\n if event.button == 'up' and self.index_trans[-1\n ] < self.vol_trans.shape[0] - 1:\n self.index_trans[-1] += 1\n self.im_trans.set_data(self.vol_trans[self.index_trans[-1]])\n self.txt_trans.set_text('Slice No: ' + str(self.index_trans[-1]))\n self.fig.canvas.draw_idle()\n\n def truth_subplot_scroll(self, event):\n if event.button == 'down' and self.index_truth[-1\n ] > -1 * self.vol_truth.shape[0]:\n self.index_truth[-1] -= 1\n if event.button == 'up' and self.index_truth[-1\n ] < self.vol_truth.shape[0] - 1:\n self.index_truth[-1] += 1\n self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])\n self.txt_truth.set_text('Slice No: ' + str(self.index_truth[-1]))\n self.fig.canvas.draw_idle()\n\n def segmen_subplot_scroll(self, event):\n if event.button == 'down' and self.index_segmen[-1\n ] > -1 * self.vol_segmen.shape[0]:\n self.index_segmen[-1] -= 1\n if event.button == 'up' and self.index_segmen[-1\n ] < self.vol_segmen.shape[0] - 1:\n self.index_segmen[-1] += 1\n self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])\n self.txt_segmen.set_text('Slice No: ' + str(self.index_segmen[-1]))\n self.fig.canvas.draw_idle()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 4 12:47:30 2019\r\nTitle: MP4-Medical Image Processing\r\n@author: MP4 Team\r\n\r\n\"\"\"\r\n\r\n# Validate window controller\r\nclass ValidateWindowCtr(object):\r\n # Initialization\r\n def __init__(self, fig, im_trans, im_truth, im_segmen, vol_trans, vol_truth, vol_segmen, ax_trans, ax_truth, ax_segmen, index_trans, index_truth, index_segmen):\r\n self.fig = fig\r\n self.im_trans, self.im_truth, self.im_segmen = im_trans, im_truth, im_segmen\r\n self.vol_trans, self.vol_truth, self.vol_segmen = vol_trans, vol_truth, vol_segmen\r\n self.ax_trans, self.ax_truth, self.ax_segmen = ax_trans, ax_truth, ax_segmen\r\n self.index_trans, self.index_truth, self.index_segmen = index_trans, index_truth, index_segmen\r\n \r\n self.txt_trans = self.ax_trans.text(0, 600, 'Slice No: '+str(self.index_trans[-1]), color='b')\r\n self.txt_truth = self.ax_truth.text(0, 10, 'Slice No: '+str(self.index_truth[-1]), color='b')\r\n self.txt_segmen = self.ax_segmen.text(0, 600, 'Slice No: '+str(self.index_segmen[-1]), color='b')\r\n \r\n self.scroll_trans = None\r\n self.scroll_truth = None\r\n self.scroll_segmen = None\r\n self.fig.canvas.mpl_connect('axes_enter_event', self.fig_enter_event)\r\n self.fig.canvas.mpl_connect('axes_leave_event', self.fig_leave_event)\r\n \r\n # Enable scrolling image\r\n def fig_enter_event(self, event):\r\n if self.ax_trans.in_axes(event):\r\n self.scroll_trans = self.fig.canvas.mpl_connect('scroll_event', self.trans_subplot_scroll)\r\n \r\n elif self.ax_truth.in_axes(event):\r\n self.scroll_truth = self.fig.canvas.mpl_connect('scroll_event', self.truth_subplot_scroll)\r\n \r\n elif self.ax_segmen.in_axes(event):\r\n self.scroll_segmen = self.fig.canvas.mpl_connect('scroll_event', self.segmen_subplot_scroll)\r\n \r\n # Disable scrolling image\r\n def fig_leave_event(self, event):\r\n self.fig.canvas.mpl_disconnect(self.scroll_trans)\r\n self.fig.canvas.mpl_disconnect(self.scroll_truth)\r\n self.fig.canvas.mpl_disconnect(self.scroll_segmen)\r\n \r\n # Scroll voxel image\r\n def trans_subplot_scroll(self, event): \r\n if event.button == 'down' and (self.index_trans[-1] > -1*self.vol_trans.shape[0]):\r\n self.index_trans[-1] -= 1\r\n \r\n if event.button == 'up' and (self.index_trans[-1] < self.vol_trans.shape[0]-1):\r\n self.index_trans[-1] += 1\r\n \r\n self.im_trans.set_data(self.vol_trans[self.index_trans[-1]])\r\n self.txt_trans.set_text('Slice No: '+str(self.index_trans[-1]))\r\n self.fig.canvas.draw_idle()\r\n \r\n # Scroll ground truth image\r\n def truth_subplot_scroll(self, event): \r\n if event.button == 'down' and (self.index_truth[-1] > -1*self.vol_truth.shape[0]):\r\n self.index_truth[-1] -= 1\r\n \r\n if event.button == 'up' and (self.index_truth[-1] < self.vol_truth.shape[0]-1):\r\n self.index_truth[-1] += 1\r\n \r\n self.im_truth.set_data(self.vol_truth[self.index_truth[-1]])\r\n self.txt_truth.set_text('Slice No: '+str(self.index_truth[-1]))\r\n self.fig.canvas.draw_idle()\r\n \r\n # Scroll segmented image\r\n def segmen_subplot_scroll(self, event): \r\n if event.button == 'down' and (self.index_segmen[-1] > -1*self.vol_segmen.shape[0]):\r\n self.index_segmen[-1] -= 1\r\n \r\n if event.button == 'up' and (self.index_segmen[-1] < self.vol_segmen.shape[0]-1):\r\n self.index_segmen[-1] += 1\r\n \r\n self.im_segmen.set_data(self.vol_segmen[self.index_segmen[-1]])\r\n self.txt_segmen.set_text('Slice No: '+str(self.index_segmen[-1])) \r\n self.fig.canvas.draw_idle()\r\n ",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
<|reserved_special_token_0|>
print(df)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema = None
db = Database(credentials=credentials)
<|reserved_special_token_0|>
fn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],
factor=2, output_items=['adjusted_orgoccupancycount',
'adjusted_occupancycount'])
df = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,
to_csv=True)
print(df)
<|reserved_special_token_1|>
import datetime as dt
import json
import pandas as pd
import numpy as np
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions.base import BaseTransformer
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions import ui
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema = None
db = Database(credentials=credentials)
from custom.multiplybyfactor import MultiplyByFactor
fn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],
factor=2, output_items=['adjusted_orgoccupancycount',
'adjusted_occupancycount'])
df = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,
to_csv=True)
print(df)
|
flexible
|
{
"blob_id": "f15a0956c4aa27da861f9bccbeff7a6b6a909b73",
"index": 1113,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('credentials_as.json', encoding='utf-8') as F:\n credentials = json.loads(F.read())\n<mask token>\nprint(df)\n",
"step-3": "<mask token>\nwith open('credentials_as.json', encoding='utf-8') as F:\n credentials = json.loads(F.read())\ndb_schema = None\ndb = Database(credentials=credentials)\n<mask token>\nfn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],\n factor=2, output_items=['adjusted_orgoccupancycount',\n 'adjusted_occupancycount'])\ndf = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,\n to_csv=True)\nprint(df)\n",
"step-4": "import datetime as dt\nimport json\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func\nfrom iotfunctions.base import BaseTransformer\nfrom iotfunctions.metadata import EntityType\nfrom iotfunctions.db import Database\nfrom iotfunctions import ui\nwith open('credentials_as.json', encoding='utf-8') as F:\n credentials = json.loads(F.read())\ndb_schema = None\ndb = Database(credentials=credentials)\nfrom custom.multiplybyfactor import MultiplyByFactor\nfn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],\n factor=2, output_items=['adjusted_orgoccupancycount',\n 'adjusted_occupancycount'])\ndf = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,\n to_csv=True)\nprint(df)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.034482758620689662, 0.035087719298245612), 'tuned_ensemble': ({'svm__C': 100000.0, 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7, 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33, 'cart__max_features': 0.35714285714285721, 'svm__kernel': 'sigmoid', 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11, 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights': 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': 0.43979591836734699, 'cart__min_samples_split': 18}, 0.28915662650602408, 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, 0.3529411764705882), 'best_param_ensemble': ({}, 0.28915662650602408, 0.2988505747126437), 'rf': ({'min_samples_split': 17, 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542, 'max_leaf_nodes': 46, 'max_features': 0.94489795918367347}, 0.27083333333333337, 0.38095238095238099), 'cart': ({'max_depth': 50, 'random_state': 1542, 'max_features': 0.19183673469387758, 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.31192660550458717, 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, 0.23529411764705882, 0.23749999999999996)}}
|
normal
|
{
"blob_id": "fa02fb701b59728671a7e87147adaeb33422dcdb",
"index": 1600,
"step-1": "<mask token>\n",
"step-2": "{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.03448275862068966, \n 0.03508771929824561), 'tuned_ensemble': ({'svm__C': 100000.0,\n 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7,\n 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33,\n 'cart__max_features': 0.3571428571428572, 'svm__kernel': 'sigmoid',\n 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11,\n 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights':\n 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': \n 0.439795918367347, 'cart__min_samples_split': 18}, 0.2891566265060241, \n 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, \n 0.3529411764705882), 'best_param_ensemble': ({}, 0.2891566265060241, \n 0.2988505747126437), 'rf': ({'min_samples_split': 17,\n 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542,\n 'max_leaf_nodes': 46, 'max_features': 0.9448979591836735}, \n 0.27083333333333337, 0.380952380952381), 'cart': ({'max_depth': 50,\n 'random_state': 1542, 'max_features': 0.19183673469387758,\n 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.3119266055045872, \n 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, \n 0.23529411764705882, 0.23749999999999996)}}\n",
"step-3": "{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.034482758620689662, 0.035087719298245612), 'tuned_ensemble': ({'svm__C': 100000.0, 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7, 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33, 'cart__max_features': 0.35714285714285721, 'svm__kernel': 'sigmoid', 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11, 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights': 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': 0.43979591836734699, 'cart__min_samples_split': 18}, 0.28915662650602408, 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, 0.3529411764705882), 'best_param_ensemble': ({}, 0.28915662650602408, 0.2988505747126437), 'rf': ({'min_samples_split': 17, 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542, 'max_leaf_nodes': 46, 'max_features': 0.94489795918367347}, 0.27083333333333337, 0.38095238095238099), 'cart': ({'max_depth': 50, 'random_state': 1542, 'max_features': 0.19183673469387758, 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.31192660550458717, 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, 0.23529411764705882, 0.23749999999999996)}}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('-' * 100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-' * 100)
<|reserved_special_token_0|>
print(prendaseleccionada1)
<|reserved_special_token_0|>
print('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')
<|reserved_special_token_0|>
if valor1 == 's':
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
elif valor1 == 'n':
v1 = 'n'
valor1 = 0
<|reserved_special_token_0|>
print(prendaseleccionada2)
<|reserved_special_token_0|>
print('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')
<|reserved_special_token_0|>
if valor2 == 's':
v2 = 's'
valor2 = precio2
superPuntos = superPuntos + precio2
elif valor2 == 'n':
v2 = 'n'
valor2 = 0
<|reserved_special_token_0|>
print(prendaseleccionada3)
<|reserved_special_token_0|>
print('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')
<|reserved_special_token_0|>
if valor3 == 's':
v3 = 's'
valor3 = precio3
superPuntos = superPuntos + precio3
elif valor3 == 'n':
v3 = 'n'
valor3 = 0
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
elif precio2 < precio3:
precio2 = 0
else:
precio3 = 0
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
<|reserved_special_token_0|>
if formaDePago == 1:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
elif formaDePago == 2:
cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))
if cuotas <= 3:
formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 102
elif cuotas > 3:
formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 105
elif cuotas <= 0:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print('----------------------------------------------------')
print('Tienda Elegancia')
print('Tipo, Precio, SuperPuntos')
print(prendaseleccionada1, precioinicial1, v1)
print(prendaseleccionada2, precioinicial2, v2)
print(prendaseleccionada3, precioinicial3, v3)
print('Total sin promo: ', precioSinPromo)
print('Ahorro: ', ahorro)
print('Total Con Promo: ', precioTotal)
print('Forma de Pago: ', formaDePago)
print('Monto a Pagar: ', montoAPagar)
print('Usted obtiene: ', superPuntos, 'SuperPuntos')
print('----------------------------------------------------')
<|reserved_special_token_1|>
print('-' * 100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-' * 100)
prendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos',
'Abrigos', 'Calzado')
precioSinPromo = 0
superPuntos = 0
tipoPrenda1 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada1 = prendas[tipoPrenda1]
print(prendaseleccionada1)
precio1 = float(input('Ingrese precio: $'))
precioinicial1 = precio1
precioSinPromo = precioSinPromo + precio1
print('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')
valor1 = input()
v1 = None
if valor1 == 's':
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
elif valor1 == 'n':
v1 = 'n'
valor1 = 0
tipoPrenda2 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada2 = prendas[tipoPrenda2]
print(prendaseleccionada2)
precio2 = float(input('Ingrese precio: $'))
precioinicial2 = precio2
precioSinPromo = precioSinPromo + precio2
print('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')
valor2 = input()
v2 = None
if valor2 == 's':
v2 = 's'
valor2 = precio2
superPuntos = superPuntos + precio2
elif valor2 == 'n':
v2 = 'n'
valor2 = 0
tipoPrenda3 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada3 = prendas[tipoPrenda3]
print(prendaseleccionada3)
precio3 = float(input('Ingrese precio: $'))
precioinicial3 = precio3
precioSinPromo = precioSinPromo + precio3
print('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')
valor3 = input()
v3 = None
if valor3 == 's':
v3 = 's'
valor3 = precio3
superPuntos = superPuntos + precio3
elif valor3 == 'n':
v3 = 'n'
valor3 = 0
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
elif precio2 < precio3:
precio2 = 0
else:
precio3 = 0
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
precioTotal = precio1 + precio2 + precio3
ahorro = precioSinPromo - precioTotal
formaDePago = int(input('Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta'))
montoAPagar = 0
if formaDePago == 1:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
elif formaDePago == 2:
cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))
if cuotas <= 3:
formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 102
elif cuotas > 3:
formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 105
elif cuotas <= 0:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print('----------------------------------------------------')
print('Tienda Elegancia')
print('Tipo, Precio, SuperPuntos')
print(prendaseleccionada1, precioinicial1, v1)
print(prendaseleccionada2, precioinicial2, v2)
print(prendaseleccionada3, precioinicial3, v3)
print('Total sin promo: ', precioSinPromo)
print('Ahorro: ', ahorro)
print('Total Con Promo: ', precioTotal)
print('Forma de Pago: ', formaDePago)
print('Monto a Pagar: ', montoAPagar)
print('Usted obtiene: ', superPuntos, 'SuperPuntos')
print('----------------------------------------------------')
<|reserved_special_token_1|>
print('-'*100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-'*100)
prendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos', 'Abrigos', 'Calzado')
precioSinPromo = 0
superPuntos = 0
#ARTICULO 1
tipoPrenda1 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada1 = prendas[tipoPrenda1]
print(prendaseleccionada1)
precio1 = float(input('Ingrese precio: $'))
precioinicial1 = precio1
precioSinPromo = precioSinPromo + precio1
print("La prenda: ", tipoPrenda1,"participa de del plan SuperPuntos? s/n")
valor1 = input()
v1 = None
if(valor1 == "s"):
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
else:
if(valor1 == "n"):
v1 = "n"
valor1 = 0
# ARTICULO 2
tipoPrenda2 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada2 = prendas[tipoPrenda2]
print(prendaseleccionada2)
precio2 = float(input('Ingrese precio: $'))
precioinicial2 = precio2
precioSinPromo = precioSinPromo + precio2
print("La prenda: ", tipoPrenda2, "participa de del plan SuperPuntos? s/n")
valor2 = input()
v2 = None
if (valor2 == "s"):
v2 = "s"
valor2 = precio2
superPuntos = superPuntos + precio2
else:
if (valor2 == "n"):
v2 = "n"
valor2 = 0
# ARTICULO 3
tipoPrenda3 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada3 = prendas[tipoPrenda3]
print(prendaseleccionada3)
precio3 = float(input('Ingrese precio: $'))
precioinicial3 = precio3
precioSinPromo = precioSinPromo + precio3
print("La prenda: ", tipoPrenda3, "participa de del plan SuperPuntos? s/n")
valor3 = input()
v3 = None
if (valor3 == "s"):
v3 = "s"
valor3 = precio3
superPuntos = superPuntos + precio3
else:
if (valor3 == "n"):
v3 = "n"
valor3 = 0
#PROMO 3X2
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
else:
if precio2 < precio3:
precio2 = 0
else:
precio3 = 0
#PROMO 50%
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
precioTotal = precio1 + precio2 + precio3
ahorro = precioSinPromo - precioTotal
#FORMA DE PAGO
formaDePago = int(input("Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta"))
montoAPagar = 0
if formaDePago == 1:
formaDePago = "Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
else:
if(formaDePago == 2):
cuotas=int(input("ingrese en cuantas cuotas desea pagar:"))
if(cuotas <= 3):
formaDePago="Tarjeta (%2 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*102
else:
if(cuotas > 3):
formaDePago="Tarjeta (%5 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*105
else:
if(cuotas <= 0):
formaDePago="Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print("----------------------------------------------------")
print("Tienda Elegancia")
print("Tipo, Precio, SuperPuntos")
print(prendaseleccionada1 , precioinicial1, v1)
print(prendaseleccionada2 , precioinicial2 , v2)
print(prendaseleccionada3 , precioinicial3 , v3)
print("Total sin promo: ", precioSinPromo)
print("Ahorro: ", ahorro)
print("Total Con Promo: ", precioTotal)
print("Forma de Pago: ", formaDePago)
print("Monto a Pagar: ", montoAPagar)
print("Usted obtiene: ", superPuntos, "SuperPuntos")
print("----------------------------------------------------")
|
flexible
|
{
"blob_id": "333d237dd4a203fcfde3668901d725f16fbc402e",
"index": 1684,
"step-1": "<mask token>\n",
"step-2": "print('-' * 100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-' * 100)\n<mask token>\nprint(prendaseleccionada1)\n<mask token>\nprint('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor1 == 's':\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelif valor1 == 'n':\n v1 = 'n'\n valor1 = 0\n<mask token>\nprint(prendaseleccionada2)\n<mask token>\nprint('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor2 == 's':\n v2 = 's'\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelif valor2 == 'n':\n v2 = 'n'\n valor2 = 0\n<mask token>\nprint(prendaseleccionada3)\n<mask token>\nprint('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')\n<mask token>\nif valor3 == 's':\n v3 = 's'\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelif valor3 == 'n':\n v3 = 'n'\n valor3 = 0\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n elif precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\n<mask token>\nif formaDePago == 1:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nelif formaDePago == 2:\n cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))\n if cuotas <= 3:\n formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 102\n elif cuotas > 3:\n formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 105\n elif cuotas <= 0:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\nprint('----------------------------------------------------')\nprint('Tienda Elegancia')\nprint('Tipo, Precio, SuperPuntos')\nprint(prendaseleccionada1, precioinicial1, v1)\nprint(prendaseleccionada2, precioinicial2, v2)\nprint(prendaseleccionada3, precioinicial3, v3)\nprint('Total sin promo: ', precioSinPromo)\nprint('Ahorro: ', ahorro)\nprint('Total Con Promo: ', precioTotal)\nprint('Forma de Pago: ', formaDePago)\nprint('Monto a Pagar: ', montoAPagar)\nprint('Usted obtiene: ', superPuntos, 'SuperPuntos')\nprint('----------------------------------------------------')\n",
"step-3": "print('-' * 100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-' * 100)\nprendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos',\n 'Abrigos', 'Calzado')\nprecioSinPromo = 0\nsuperPuntos = 0\ntipoPrenda1 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada1 = prendas[tipoPrenda1]\nprint(prendaseleccionada1)\nprecio1 = float(input('Ingrese precio: $'))\nprecioinicial1 = precio1\nprecioSinPromo = precioSinPromo + precio1\nprint('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')\nvalor1 = input()\nv1 = None\nif valor1 == 's':\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelif valor1 == 'n':\n v1 = 'n'\n valor1 = 0\ntipoPrenda2 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada2 = prendas[tipoPrenda2]\nprint(prendaseleccionada2)\nprecio2 = float(input('Ingrese precio: $'))\nprecioinicial2 = precio2\nprecioSinPromo = precioSinPromo + precio2\nprint('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')\nvalor2 = input()\nv2 = None\nif valor2 == 's':\n v2 = 's'\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelif valor2 == 'n':\n v2 = 'n'\n valor2 = 0\ntipoPrenda3 = int(input(\n 'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '\n ))\nprendaseleccionada3 = prendas[tipoPrenda3]\nprint(prendaseleccionada3)\nprecio3 = float(input('Ingrese precio: $'))\nprecioinicial3 = precio3\nprecioSinPromo = precioSinPromo + precio3\nprint('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')\nvalor3 = input()\nv3 = None\nif valor3 == 's':\n v3 = 's'\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelif valor3 == 'n':\n v3 = 'n'\n valor3 = 0\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n elif precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\nprecioTotal = precio1 + precio2 + precio3\nahorro = precioSinPromo - precioTotal\nformaDePago = int(input('Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta'))\nmontoAPagar = 0\nif formaDePago == 1:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nelif formaDePago == 2:\n cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))\n if cuotas <= 3:\n formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 102\n elif cuotas > 3:\n formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas\n montoAPagar = precioTotal / 100 * 105\n elif cuotas <= 0:\n formaDePago = 'Contado (%10 de Descuento)'\n montoAPagar = precioTotal / 100 * 90\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\nprint('----------------------------------------------------')\nprint('Tienda Elegancia')\nprint('Tipo, Precio, SuperPuntos')\nprint(prendaseleccionada1, precioinicial1, v1)\nprint(prendaseleccionada2, precioinicial2, v2)\nprint(prendaseleccionada3, precioinicial3, v3)\nprint('Total sin promo: ', precioSinPromo)\nprint('Ahorro: ', ahorro)\nprint('Total Con Promo: ', precioTotal)\nprint('Forma de Pago: ', formaDePago)\nprint('Monto a Pagar: ', montoAPagar)\nprint('Usted obtiene: ', superPuntos, 'SuperPuntos')\nprint('----------------------------------------------------')\n",
"step-4": "print('-'*100)\nprint('BIENVENIDOS A TIENDA ELEGANCIA')\nprint('-'*100)\n\nprendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos', 'Abrigos', 'Calzado')\n\nprecioSinPromo = 0\nsuperPuntos = 0\n\n#ARTICULO 1\ntipoPrenda1 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada1 = prendas[tipoPrenda1]\nprint(prendaseleccionada1)\nprecio1 = float(input('Ingrese precio: $'))\nprecioinicial1 = precio1\nprecioSinPromo = precioSinPromo + precio1\n\nprint(\"La prenda: \", tipoPrenda1,\"participa de del plan SuperPuntos? s/n\")\nvalor1 = input()\nv1 = None\nif(valor1 == \"s\"):\n v1 = 's'\n valor1 = precio1\n superPuntos = superPuntos + precio1\nelse:\n if(valor1 == \"n\"):\n v1 = \"n\"\n valor1 = 0\n\n# ARTICULO 2\ntipoPrenda2 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada2 = prendas[tipoPrenda2]\nprint(prendaseleccionada2)\nprecio2 = float(input('Ingrese precio: $'))\nprecioinicial2 = precio2\nprecioSinPromo = precioSinPromo + precio2\n\nprint(\"La prenda: \", tipoPrenda2, \"participa de del plan SuperPuntos? s/n\")\nvalor2 = input()\nv2 = None\nif (valor2 == \"s\"):\n v2 = \"s\"\n valor2 = precio2\n superPuntos = superPuntos + precio2\nelse:\n if (valor2 == \"n\"):\n v2 = \"n\"\n valor2 = 0\n\n# ARTICULO 3\ntipoPrenda3 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))\nprendaseleccionada3 = prendas[tipoPrenda3]\nprint(prendaseleccionada3)\nprecio3 = float(input('Ingrese precio: $'))\nprecioinicial3 = precio3\nprecioSinPromo = precioSinPromo + precio3\n\nprint(\"La prenda: \", tipoPrenda3, \"participa de del plan SuperPuntos? s/n\")\nvalor3 = input()\nv3 = None\nif (valor3 == \"s\"):\n v3 = \"s\"\n valor3 = precio3\n superPuntos = superPuntos + precio3\nelse:\n if (valor3 == \"n\"):\n v3 = \"n\"\n valor3 = 0\n\n#PROMO 3X2\nif tipoPrenda1 == tipoPrenda2 == tipoPrenda3:\n if precio1 < precio2 and precio1 < precio3:\n precio1 = 0\n else:\n if precio2 < precio3:\n precio2 = 0\n else:\n precio3 = 0\n\n#PROMO 50%\nif tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:\n if precio1 > precio2:\n precio1 = precio1 / 2\n else:\n precio2 = precio2 / 2\n\nif tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:\n if precio1 > precio3:\n precio1 = precio1 / 2\n else:\n precio3 = precio3 / 2\n\nif tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:\n if precio2 > precio3:\n precio2 = precio2 / 2\n else:\n precio3 = precio3 / 2\n\nprecioTotal = precio1 + precio2 + precio3\nahorro = precioSinPromo - precioTotal\n\n#FORMA DE PAGO\nformaDePago = int(input(\"Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta\"))\nmontoAPagar = 0\n\nif formaDePago == 1:\n formaDePago = \"Contado (%10 de Descuento)\"\n montoAPagar=precioTotal/100*90\nelse:\n if(formaDePago == 2):\n cuotas=int(input(\"ingrese en cuantas cuotas desea pagar:\"))\n if(cuotas <= 3):\n formaDePago=\"Tarjeta (%2 de Recarga) cantidad de cuotas:\", cuotas\n montoAPagar=precioTotal/100*102\n else:\n if(cuotas > 3):\n formaDePago=\"Tarjeta (%5 de Recarga) cantidad de cuotas:\", cuotas\n montoAPagar=precioTotal/100*105\n else:\n if(cuotas <= 0):\n formaDePago=\"Contado (%10 de Descuento)\"\n montoAPagar=precioTotal/100*90\n\nif valor1 > 0 and valor2 > 0 and valor3 > 0:\n superPuntos = superPuntos * 2\n\nprint(\"----------------------------------------------------\")\nprint(\"Tienda Elegancia\")\nprint(\"Tipo, Precio, SuperPuntos\")\nprint(prendaseleccionada1 , precioinicial1, v1)\nprint(prendaseleccionada2 , precioinicial2 , v2)\nprint(prendaseleccionada3 , precioinicial3 , v3)\nprint(\"Total sin promo: \", precioSinPromo)\nprint(\"Ahorro: \", ahorro)\nprint(\"Total Con Promo: \", precioTotal)\nprint(\"Forma de Pago: \", formaDePago)\nprint(\"Monto a Pagar: \", montoAPagar)\nprint(\"Usted obtiene: \", superPuntos, \"SuperPuntos\")\nprint(\"----------------------------------------------------\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from yapsy.IPlugin import IPlugin
import wolframalpha
import yaml
keys_file = open("friday/plugins/KEYS")
keys = yaml.load(keys_file)
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result']\
and 'action' in request['result'] and request['result']['action'] == 'wisdom.unknown'
# result = request['result'] # Assumes we're using gTTS
# # Get the text that is supposed to be spoken aloud
# reply = result['fulfillment']['speech']
# # Get what the service thought you said
# question = result['resolvedQuery']
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('\n', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
#
# def wolfram_query(question):
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question:
#
#
# def wolfram_query_old(question):
# import wolframalpha
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question.lower().startswith('wolfram'):
# question = question[8:]
# client = wolframalpha.Client(user_info.WOLFRAM_KEY)
# res = client.query(question)
# try:
# return next(res.results).text # This really needs to be changed.
# # I shouldn't have to rely upon error catching for my flow control.
# except StopIteration:
# pass
# try:
# answer = ' '.join([each_answer.text for each_answer in res.pods if each_answer])
# except TypeError:
# answer = None
# if not answer:
# answer = "Sorry, Wolfram doesn't know the answer."
#
# # Replace some of its notation so it's more easily read.
# answer = answer.replace('\n', '; ').replace('~~', ' or about ')
# # Get the result to a computation and don't bother reading the original question.
# if '=' in answer:
# answer = answer[answer.index('=')+1:]
# return [answer, None] # Follows answer format of [text, action]
#
|
normal
|
{
"blob_id": "57564c2e94a65187bf5e033ee06926fb593e11a7",
"index": 7733,
"step-1": "<mask token>\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-2": "<mask token>\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-3": "<mask token>\nkeys_file = open('friday/plugins/KEYS')\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-4": "from yapsy.IPlugin import IPlugin\nimport wolframalpha\nimport yaml\nkeys_file = open('friday/plugins/KEYS')\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-5": "from yapsy.IPlugin import IPlugin\nimport wolframalpha\nimport yaml\n\nkeys_file = open(\"friday/plugins/KEYS\")\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result']\\\n and 'action' in request['result'] and request['result']['action'] == 'wisdom.unknown'\n # result = request['result'] # Assumes we're using gTTS\n # # Get the text that is supposed to be spoken aloud\n # reply = result['fulfillment']['speech']\n # # Get what the service thought you said\n # question = result['resolvedQuery']\n\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n\n#\n# def wolfram_query(question):\n# # Every service should have a general set of requirements under which\n# # it is activated, this would be one of the ones that Wolfram Alpha\n# # uses, it does have others as well. Consider having a single method\n# # in the plugin system that returns a boolean determining whether\n# # a plugin should be activated.\n# if question:\n#\n#\n# def wolfram_query_old(question):\n# import wolframalpha\n# # Every service should have a general set of requirements under which\n# # it is activated, this would be one of the ones that Wolfram Alpha\n# # uses, it does have others as well. Consider having a single method\n# # in the plugin system that returns a boolean determining whether\n# # a plugin should be activated.\n# if question.lower().startswith('wolfram'):\n# question = question[8:]\n# client = wolframalpha.Client(user_info.WOLFRAM_KEY)\n# res = client.query(question)\n# try:\n# return next(res.results).text # This really needs to be changed.\n# # I shouldn't have to rely upon error catching for my flow control.\n# except StopIteration:\n# pass\n# try:\n# answer = ' '.join([each_answer.text for each_answer in res.pods if each_answer])\n# except TypeError:\n# answer = None\n# if not answer:\n# answer = \"Sorry, Wolfram doesn't know the answer.\"\n#\n# # Replace some of its notation so it's more easily read.\n# answer = answer.replace('\\n', '; ').replace('~~', ' or about ')\n# # Get the result to a computation and don't bother reading the original question.\n# if '=' in answer:\n# answer = answer[answer.index('=')+1:]\n# return [answer, None] # Follows answer format of [text, action]\n#\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, number_files - N - 1, N):
img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)
img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
if len(keypoints) == 0:
keypoints.append(kp1)
keypoints.append(kp2)
else:
keypoints.append(kp2)
matches = flann.knnMatch(des1, des2, k=2)
print(i)
if len(matches):
good = []
for m, n in matches:
if m.distance < 0.6 * n.distance:
good.append(m)
avg = (len(kp1) + len(kp2)) / 2
if avg:
ratio = len(good) / float(avg)
else:
ratio = 0
else:
ratio = 0
similarity.append(ratio)
<|reserved_special_token_0|>
for i in range(1, n - 2):
if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:
t = i - 1
r = i + 1
while similarity[t] < similarity[t - 1]:
t = t - 1
if r < n - 2:
while similarity[r] < similarity[r + 1]:
r = r + 1
if similarity[i] < similarity[t] * T or similarity[i] < similarity[r
] * T:
boundaries.append(i)
<|reserved_special_token_0|>
for i in range(len(boundaries) - 2):
clip_start = int(boundaries[i]) * N / float(25)
clip_end = int(boundaries[i + 1]) * N / float(25)
clip = video.subclip(clip_start, clip_end)
clip.write_videofile('./output/shot_%s.mp4' % i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N = 1
sift = cv2.xfeatures2d.SIFT_create()
list = os.listdir('./frames')
number_files = len(list)
similarity = []
boundaries = []
keypoints = []
T = 0.5
index_params = dict(algorithm=0, trees=5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
for i in range(0, number_files - N - 1, N):
img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)
img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
if len(keypoints) == 0:
keypoints.append(kp1)
keypoints.append(kp2)
else:
keypoints.append(kp2)
matches = flann.knnMatch(des1, des2, k=2)
print(i)
if len(matches):
good = []
for m, n in matches:
if m.distance < 0.6 * n.distance:
good.append(m)
avg = (len(kp1) + len(kp2)) / 2
if avg:
ratio = len(good) / float(avg)
else:
ratio = 0
else:
ratio = 0
similarity.append(ratio)
n = len(similarity)
for i in range(1, n - 2):
if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:
t = i - 1
r = i + 1
while similarity[t] < similarity[t - 1]:
t = t - 1
if r < n - 2:
while similarity[r] < similarity[r + 1]:
r = r + 1
if similarity[i] < similarity[t] * T or similarity[i] < similarity[r
] * T:
boundaries.append(i)
video = VideoFileClip('test.mp4')
for i in range(len(boundaries) - 2):
clip_start = int(boundaries[i]) * N / float(25)
clip_end = int(boundaries[i + 1]) * N / float(25)
clip = video.subclip(clip_start, clip_end)
clip.write_videofile('./output/shot_%s.mp4' % i)
<|reserved_special_token_1|>
import numpy as np
import cv2
import os
from moviepy.editor import *
N = 1
sift = cv2.xfeatures2d.SIFT_create()
list = os.listdir('./frames')
number_files = len(list)
similarity = []
boundaries = []
keypoints = []
T = 0.5
index_params = dict(algorithm=0, trees=5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
for i in range(0, number_files - N - 1, N):
img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)
img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
if len(keypoints) == 0:
keypoints.append(kp1)
keypoints.append(kp2)
else:
keypoints.append(kp2)
matches = flann.knnMatch(des1, des2, k=2)
print(i)
if len(matches):
good = []
for m, n in matches:
if m.distance < 0.6 * n.distance:
good.append(m)
avg = (len(kp1) + len(kp2)) / 2
if avg:
ratio = len(good) / float(avg)
else:
ratio = 0
else:
ratio = 0
similarity.append(ratio)
n = len(similarity)
for i in range(1, n - 2):
if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:
t = i - 1
r = i + 1
while similarity[t] < similarity[t - 1]:
t = t - 1
if r < n - 2:
while similarity[r] < similarity[r + 1]:
r = r + 1
if similarity[i] < similarity[t] * T or similarity[i] < similarity[r
] * T:
boundaries.append(i)
video = VideoFileClip('test.mp4')
for i in range(len(boundaries) - 2):
clip_start = int(boundaries[i]) * N / float(25)
clip_end = int(boundaries[i + 1]) * N / float(25)
clip = video.subclip(clip_start, clip_end)
clip.write_videofile('./output/shot_%s.mp4' % i)
<|reserved_special_token_1|>
import numpy as np
import cv2
import os
from moviepy.editor import *
N = 1
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# count file number in folder frames
list = os.listdir('./frames')
number_files = len(list)
# array to store similarity of 2 consecutive frames
similarity = []
boundaries = []
keypoints = []
#threshold
T = 0.5
# open file to write result
# file = open("result.txt", "w")
index_params = dict(algorithm = 0, trees = 5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
# bf = cv2.BFMatcher()
# run loop
for i in range(0, number_files-N-1, N):
img1 = cv2.imread('./frames/frame%d.jpg' %i, 0)
img2 = cv2.imread('./frames/frame%d.jpg' %(i+N), 0)
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
if(len(keypoints) == 0):
keypoints.append(kp1)
keypoints.append(kp2)
else:
keypoints.append(kp2)
matches = flann.knnMatch(des1, des2, k=2)
print(i)
# Apply ratio test
if len(matches):
good = []
for m,n in matches:
if m.distance < 0.6*n.distance:
good.append(m)
avg = (len(kp1) + len(kp2)) / 2
if avg:
ratio = len(good) / float(avg)
else:
ratio = 0
else:
ratio = 0
similarity.append(ratio)
n = len(similarity)
for i in range(1, n-2):
if similarity[i] < similarity[i-1] and similarity[i] < similarity[i+1]:
t = i-1
r = i+1
while similarity[t] < similarity[t-1]: t = t-1
if r < n-2:
while similarity[r] < similarity[r+1]: r = r+1
if similarity[i] < similarity[t]*T or similarity[i] < similarity[r]*T:
# file.write(str(i) + "\n")
boundaries.append(i)
# file.close()
video = VideoFileClip("test.mp4")
for i in range (len(boundaries)-2):
clip_start = int(boundaries[i]) * N / float(25)
clip_end = int(boundaries[i+1]) * N / float(25)
clip = video.subclip(clip_start, clip_end)
clip.write_videofile("./output/shot_%s.mp4" %i)
|
flexible
|
{
"blob_id": "397d9b1030a1ec08d04d2101f65a83547495b861",
"index": 7165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, number_files - N - 1, N):\n img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)\n img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n if len(keypoints) == 0:\n keypoints.append(kp1)\n keypoints.append(kp2)\n else:\n keypoints.append(kp2)\n matches = flann.knnMatch(des1, des2, k=2)\n print(i)\n if len(matches):\n good = []\n for m, n in matches:\n if m.distance < 0.6 * n.distance:\n good.append(m)\n avg = (len(kp1) + len(kp2)) / 2\n if avg:\n ratio = len(good) / float(avg)\n else:\n ratio = 0\n else:\n ratio = 0\n similarity.append(ratio)\n<mask token>\nfor i in range(1, n - 2):\n if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:\n t = i - 1\n r = i + 1\n while similarity[t] < similarity[t - 1]:\n t = t - 1\n if r < n - 2:\n while similarity[r] < similarity[r + 1]:\n r = r + 1\n if similarity[i] < similarity[t] * T or similarity[i] < similarity[r\n ] * T:\n boundaries.append(i)\n<mask token>\nfor i in range(len(boundaries) - 2):\n clip_start = int(boundaries[i]) * N / float(25)\n clip_end = int(boundaries[i + 1]) * N / float(25)\n clip = video.subclip(clip_start, clip_end)\n clip.write_videofile('./output/shot_%s.mp4' % i)\n",
"step-3": "<mask token>\nN = 1\nsift = cv2.xfeatures2d.SIFT_create()\nlist = os.listdir('./frames')\nnumber_files = len(list)\nsimilarity = []\nboundaries = []\nkeypoints = []\nT = 0.5\nindex_params = dict(algorithm=0, trees=5)\nsearch_params = dict()\nflann = cv2.FlannBasedMatcher(index_params, search_params)\nfor i in range(0, number_files - N - 1, N):\n img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)\n img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n if len(keypoints) == 0:\n keypoints.append(kp1)\n keypoints.append(kp2)\n else:\n keypoints.append(kp2)\n matches = flann.knnMatch(des1, des2, k=2)\n print(i)\n if len(matches):\n good = []\n for m, n in matches:\n if m.distance < 0.6 * n.distance:\n good.append(m)\n avg = (len(kp1) + len(kp2)) / 2\n if avg:\n ratio = len(good) / float(avg)\n else:\n ratio = 0\n else:\n ratio = 0\n similarity.append(ratio)\nn = len(similarity)\nfor i in range(1, n - 2):\n if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:\n t = i - 1\n r = i + 1\n while similarity[t] < similarity[t - 1]:\n t = t - 1\n if r < n - 2:\n while similarity[r] < similarity[r + 1]:\n r = r + 1\n if similarity[i] < similarity[t] * T or similarity[i] < similarity[r\n ] * T:\n boundaries.append(i)\nvideo = VideoFileClip('test.mp4')\nfor i in range(len(boundaries) - 2):\n clip_start = int(boundaries[i]) * N / float(25)\n clip_end = int(boundaries[i + 1]) * N / float(25)\n clip = video.subclip(clip_start, clip_end)\n clip.write_videofile('./output/shot_%s.mp4' % i)\n",
"step-4": "import numpy as np\nimport cv2\nimport os\nfrom moviepy.editor import *\nN = 1\nsift = cv2.xfeatures2d.SIFT_create()\nlist = os.listdir('./frames')\nnumber_files = len(list)\nsimilarity = []\nboundaries = []\nkeypoints = []\nT = 0.5\nindex_params = dict(algorithm=0, trees=5)\nsearch_params = dict()\nflann = cv2.FlannBasedMatcher(index_params, search_params)\nfor i in range(0, number_files - N - 1, N):\n img1 = cv2.imread('./frames/frame%d.jpg' % i, 0)\n img2 = cv2.imread('./frames/frame%d.jpg' % (i + N), 0)\n kp1, des1 = sift.detectAndCompute(img1, None)\n kp2, des2 = sift.detectAndCompute(img2, None)\n if len(keypoints) == 0:\n keypoints.append(kp1)\n keypoints.append(kp2)\n else:\n keypoints.append(kp2)\n matches = flann.knnMatch(des1, des2, k=2)\n print(i)\n if len(matches):\n good = []\n for m, n in matches:\n if m.distance < 0.6 * n.distance:\n good.append(m)\n avg = (len(kp1) + len(kp2)) / 2\n if avg:\n ratio = len(good) / float(avg)\n else:\n ratio = 0\n else:\n ratio = 0\n similarity.append(ratio)\nn = len(similarity)\nfor i in range(1, n - 2):\n if similarity[i] < similarity[i - 1] and similarity[i] < similarity[i + 1]:\n t = i - 1\n r = i + 1\n while similarity[t] < similarity[t - 1]:\n t = t - 1\n if r < n - 2:\n while similarity[r] < similarity[r + 1]:\n r = r + 1\n if similarity[i] < similarity[t] * T or similarity[i] < similarity[r\n ] * T:\n boundaries.append(i)\nvideo = VideoFileClip('test.mp4')\nfor i in range(len(boundaries) - 2):\n clip_start = int(boundaries[i]) * N / float(25)\n clip_end = int(boundaries[i + 1]) * N / float(25)\n clip = video.subclip(clip_start, clip_end)\n clip.write_videofile('./output/shot_%s.mp4' % i)\n",
"step-5": "import numpy as np\nimport cv2\nimport os\nfrom moviepy.editor import *\n\nN = 1\n# Initiate SIFT detector\nsift = cv2.xfeatures2d.SIFT_create()\n\n# count file number in folder frames\nlist = os.listdir('./frames')\nnumber_files = len(list)\n\n# array to store similarity of 2 consecutive frames\nsimilarity = []\n\nboundaries = []\n\nkeypoints = []\n\n#threshold\nT = 0.5\n\n# open file to write result\n# file = open(\"result.txt\", \"w\")\n\nindex_params = dict(algorithm = 0, trees = 5)\nsearch_params = dict()\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n# bf = cv2.BFMatcher()\n\n# run loop\nfor i in range(0, number_files-N-1, N):\n\n img1 = cv2.imread('./frames/frame%d.jpg' %i, 0) \n img2 = cv2.imread('./frames/frame%d.jpg' %(i+N), 0)\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img1,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n\n if(len(keypoints) == 0):\n keypoints.append(kp1)\n keypoints.append(kp2)\n else:\n keypoints.append(kp2)\n \n matches = flann.knnMatch(des1, des2, k=2)\n print(i)\n # Apply ratio test\n if len(matches):\n good = []\n for m,n in matches:\n if m.distance < 0.6*n.distance:\n good.append(m)\n\n avg = (len(kp1) + len(kp2)) / 2\n if avg:\n ratio = len(good) / float(avg)\n else:\n ratio = 0\n else:\n ratio = 0\n \n similarity.append(ratio)\n\nn = len(similarity)\n\nfor i in range(1, n-2):\n if similarity[i] < similarity[i-1] and similarity[i] < similarity[i+1]:\n t = i-1\n r = i+1\n while similarity[t] < similarity[t-1]: t = t-1\n if r < n-2:\n while similarity[r] < similarity[r+1]: r = r+1\n \n if similarity[i] < similarity[t]*T or similarity[i] < similarity[r]*T: \n # file.write(str(i) + \"\\n\")\n boundaries.append(i)\n \n# file.close()\nvideo = VideoFileClip(\"test.mp4\")\nfor i in range (len(boundaries)-2):\n clip_start = int(boundaries[i]) * N / float(25)\n clip_end = int(boundaries[i+1]) * N / float(25)\n clip = video.subclip(clip_start, clip_end)\n clip.write_videofile(\"./output/shot_%s.mp4\" %i)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import doseresponse as dr
import numpy as np
import scipy.stats as st
import numpy.random as npr
import argparse
import itertools as it
# get rid of for real version
import pandas as pd
import os
seed = 1
npr.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--samples", type=int, help="number of Hill and pIC50 samples for use in AP model",default=500)
parser.add_argument("-a", "--all", action='store_true', help='construct posterior predictive CDFs for Hill and pIC50 for all drugs and channels', default=False)
parser.add_argument("--num-cores", type=int, help="number of cores to parallelise drug/channel combinations",default=1)
parser.add_argument("-np", "--no-plots", action='store_true', help="don't make any plots, just save posterior predictive samples", default=False)
parser.add_argument("-tu", "--top-up", action='store_true', help="to use with --all, run on all drugs who don't already have MCMC files", default=False)
parser.add_argument("-sy", "--synthetic", action='store_true', help="use synthetic data (only one drug/channel combination exists currently", default=False)
parser.add_argument("-Ne", "--num_expts", type=int, help="how many experiments to fit to", default=0)
parser.add_argument("--data-file", type=str, help="csv file from which to read in data, in same format as provided crumb_data.csv")
args = parser.parse_args()
dr.setup(args.data_file)
drugs_to_run, channels_to_run = dr.list_drug_channel_options(args.all)
def construct_posterior_predictive_cdfs(alphas,betas,mus,ss):
num_x_pts = 501
hill_min = 0.
hill_max = 4.
pic50_min = -2.
pic50_max = 12.
hill_x_range = np.linspace(hill_min,hill_max,num_x_pts)
pic50_x_range = np.linspace(pic50_min,pic50_max,num_x_pts)
num_iterations = len(alphas) # assuming burn already discarded
hill_pdf_sum = np.zeros(num_x_pts)
hill_cdf_sum = np.zeros(num_x_pts)
pic50_pdf_sum = np.zeros(num_x_pts)
pic50_cdf_sum = np.zeros(num_x_pts)
fisk = st.fisk.cdf
fisk_pdf = st.fisk.pdf
logistic = st.logistic.cdf
logistic_pdf = st.logistic.pdf
for i in xrange(num_iterations):
hill_cdf_sum += fisk(hill_x_range,c=betas[i],scale=alphas[i],loc=0)
hill_pdf_sum += fisk_pdf(hill_x_range,c=betas[i],scale=alphas[i],loc=0)
pic50_cdf_sum += logistic(pic50_x_range,mus[i],ss[i])
pic50_pdf_sum += logistic_pdf(pic50_x_range,mus[i],ss[i])
hill_cdf_sum /= num_iterations
pic50_cdf_sum /= num_iterations
hill_pdf_sum /= num_iterations
pic50_pdf_sum /= num_iterations
return hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum
def run(drug_channel):
drug, channel = drug_channel
print "\n\n{} + {}\n\n".format(drug,channel)
num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)
if (0 < args.num_expts < num_expts):
num_expts = args.num_expts
save_samples_for_APs = False
else:
print "Fitting to all experiments\n"
save_samples_for_APs = True
drug, channel, output_dir, chain_dir, figs_dir, chain_file = dr.hierarchical_output_dirs_and_chain_file(drug,channel,num_expts)
try:
mcmc = np.loadtxt(chain_file,usecols=range(4))
except IOError:
print "tried loading", chain_file
print "No MCMC file found for {} + {}\n".format(drug,channel)
return None
total_iterations = mcmc.shape[0]
burn = total_iterations/4
mcmc = mcmc[burn:,:]
hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum = construct_posterior_predictive_cdfs(mcmc[:,0],mcmc[:,1],mcmc[:,2],mcmc[:,3])
if (not args.no_plots):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
labels = ["Hill","pIC50"]
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax1.plot(hill_x_range,hill_cdf_sum)
ax1.set_xlim(hill_x_range[0],hill_x_range[-1])
ax1.set_ylim(0,1)
ax1.set_xlabel("Hill")
ax1.set_ylabel("Cumulative distribution")
ax1.grid()
ax2 = fig.add_subplot(122,sharey=ax1)
ax2.plot(pic50_x_range,pic50_cdf_sum)
ax2.set_xlim(pic50_x_range[0],pic50_x_range[-1])
ax2.set_xlabel("pIC50")
ax2.grid()
plt.setp(ax2.get_yticklabels(), visible=False)
fig.tight_layout()
fig.savefig(figs_dir+"{}_{}_posterior_predictive_cdfs.png".format(drug,channel))
plt.close()
xs = [hill_x_range,pic50_x_range]
ys = [hill_pdf_sum,pic50_pdf_sum]
labels = ['$Hill$','$pIC50$']
file_labels = ['hill','pic50']
for i in xrange(2):
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(111)
ax.plot(xs[i],ys[i],color='blue')
ax.grid()
ax.set_xlabel(labels[i])
ax.set_ylabel('Probability density')
ax.set_title('{} posterior predictive'.format(labels[i][1:-1]))
fig.tight_layout()
fig.savefig(figs_dir+"{}_{}_{}_posterior_predictive.png".format(drug,channel,file_labels[i]))
plt.close()
hill_cdf_file, pic50_cdf_file = dr.hierarchical_posterior_predictive_cdf_files(drug,channel,num_expts)
np.savetxt(hill_cdf_file,np.vstack((hill_x_range, hill_cdf_sum)).T)
np.savetxt(pic50_cdf_file,np.vstack((pic50_x_range, pic50_cdf_sum)).T)
hill_uniform_samples = npr.rand(args.samples)
pic50_uniform_samples = npr.rand(args.samples)
hill_interpolated_inverse_cdf_samples = np.interp(hill_uniform_samples,hill_cdf_sum,hill_x_range)
pic50_interpolated_inverse_cdf_samples = np.interp(pic50_uniform_samples,pic50_cdf_sum,pic50_x_range)
# save a number of MCMC samples for use in AP models
# we currently have it set to 500
# in theory, the more samples, the better the AP histograms will look!
if save_samples_for_APs:
samples_file = dr.hierarchical_hill_and_pic50_samples_for_AP_file(drug,channel)
with open(samples_file,'w') as outfile:
outfile.write('# {} samples of (Hill,pIC50) drawn from their posterior predictive distributions, as defined by MCMC samples\n'.format(args.samples))
np.savetxt(outfile,np.vstack((hill_interpolated_inverse_cdf_samples,pic50_interpolated_inverse_cdf_samples)).T)
print "\n{} + {} done!\n".format(drug,channel)
return None
drugs_channels = it.product(drugs_to_run,channels_to_run)
if (args.num_cores<=1) or (len(drugs_to_run)==1):
for drug_channel in drugs_channels:
#run(drug_channel)
# try/except is good when running multiple MCMCs and leaving them overnight,say
# if one or more crash then the others will survive!
# however, if you need more "control", comment out the try/except, and uncomment the other run(drug_channel) line
try:
run(drug_channel)
except Exception,e:
print e
print "Failed to run {} + {}!".format(drug_channel[0],drug_channel[1])
# run multiple MCMCs in parallel
elif (args.num_cores>1):
import multiprocessing as mp
num_cores = min(args.num_cores, mp.cpu_count()-1)
pool = mp.Pool(processes=num_cores)
pool.map_async(run,drugs_channels).get(9999999)
pool.close()
pool.join()
|
normal
|
{
"blob_id": "2f6baf4de40224f5a3d00ded35e751184ab59d0d",
"index": 9201,
"step-1": "import doseresponse as dr\nimport numpy as np\nimport scipy.stats as st\n\nimport numpy.random as npr\nimport argparse\nimport itertools as it\n\n# get rid of for real version\nimport pandas as pd\nimport os\n\nseed = 1\nnpr.seed(seed)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"--samples\", type=int, help=\"number of Hill and pIC50 samples for use in AP model\",default=500)\nparser.add_argument(\"-a\", \"--all\", action='store_true', help='construct posterior predictive CDFs for Hill and pIC50 for all drugs and channels', default=False)\nparser.add_argument(\"--num-cores\", type=int, help=\"number of cores to parallelise drug/channel combinations\",default=1)\nparser.add_argument(\"-np\", \"--no-plots\", action='store_true', help=\"don't make any plots, just save posterior predictive samples\", default=False)\nparser.add_argument(\"-tu\", \"--top-up\", action='store_true', help=\"to use with --all, run on all drugs who don't already have MCMC files\", default=False)\nparser.add_argument(\"-sy\", \"--synthetic\", action='store_true', help=\"use synthetic data (only one drug/channel combination exists currently\", default=False)\nparser.add_argument(\"-Ne\", \"--num_expts\", type=int, help=\"how many experiments to fit to\", default=0)\nparser.add_argument(\"--data-file\", type=str, help=\"csv file from which to read in data, in same format as provided crumb_data.csv\")\n\nargs = parser.parse_args()\n\ndr.setup(args.data_file)\n\ndrugs_to_run, channels_to_run = dr.list_drug_channel_options(args.all)\n\ndef construct_posterior_predictive_cdfs(alphas,betas,mus,ss):\n num_x_pts = 501\n hill_min = 0.\n hill_max = 4.\n pic50_min = -2.\n pic50_max = 12.\n hill_x_range = np.linspace(hill_min,hill_max,num_x_pts)\n pic50_x_range = np.linspace(pic50_min,pic50_max,num_x_pts)\n num_iterations = len(alphas) # assuming burn already discarded\n hill_pdf_sum = np.zeros(num_x_pts)\n hill_cdf_sum = np.zeros(num_x_pts)\n pic50_pdf_sum = np.zeros(num_x_pts)\n pic50_cdf_sum = np.zeros(num_x_pts)\n fisk = st.fisk.cdf\n fisk_pdf = st.fisk.pdf\n logistic = st.logistic.cdf\n logistic_pdf = st.logistic.pdf\n for i in xrange(num_iterations):\n hill_cdf_sum += fisk(hill_x_range,c=betas[i],scale=alphas[i],loc=0)\n hill_pdf_sum += fisk_pdf(hill_x_range,c=betas[i],scale=alphas[i],loc=0)\n pic50_cdf_sum += logistic(pic50_x_range,mus[i],ss[i])\n pic50_pdf_sum += logistic_pdf(pic50_x_range,mus[i],ss[i])\n hill_cdf_sum /= num_iterations\n pic50_cdf_sum /= num_iterations\n hill_pdf_sum /= num_iterations\n pic50_pdf_sum /= num_iterations\n return hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum\n\ndef run(drug_channel):\n\n drug, channel = drug_channel\n \n print \"\\n\\n{} + {}\\n\\n\".format(drug,channel)\n \n num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)\n if (0 < args.num_expts < num_expts):\n num_expts = args.num_expts\n save_samples_for_APs = False\n else:\n print \"Fitting to all experiments\\n\"\n save_samples_for_APs = True\n \n \n drug, channel, output_dir, chain_dir, figs_dir, chain_file = dr.hierarchical_output_dirs_and_chain_file(drug,channel,num_expts)\n \n\n try:\n mcmc = np.loadtxt(chain_file,usecols=range(4))\n except IOError:\n print \"tried loading\", chain_file\n print \"No MCMC file found for {} + {}\\n\".format(drug,channel)\n return None\n total_iterations = mcmc.shape[0]\n burn = total_iterations/4\n mcmc = mcmc[burn:,:]\n \n \n\n hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum = construct_posterior_predictive_cdfs(mcmc[:,0],mcmc[:,1],mcmc[:,2],mcmc[:,3])\n \n if (not args.no_plots):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt \n labels = [\"Hill\",\"pIC50\"]\n fig = plt.figure(figsize=(8,4))\n ax1 = fig.add_subplot(121)\n ax1.plot(hill_x_range,hill_cdf_sum)\n ax1.set_xlim(hill_x_range[0],hill_x_range[-1])\n ax1.set_ylim(0,1)\n ax1.set_xlabel(\"Hill\")\n ax1.set_ylabel(\"Cumulative distribution\")\n ax1.grid()\n ax2 = fig.add_subplot(122,sharey=ax1)\n ax2.plot(pic50_x_range,pic50_cdf_sum)\n ax2.set_xlim(pic50_x_range[0],pic50_x_range[-1])\n ax2.set_xlabel(\"pIC50\")\n ax2.grid()\n plt.setp(ax2.get_yticklabels(), visible=False)\n fig.tight_layout()\n fig.savefig(figs_dir+\"{}_{}_posterior_predictive_cdfs.png\".format(drug,channel))\n plt.close()\n xs = [hill_x_range,pic50_x_range]\n ys = [hill_pdf_sum,pic50_pdf_sum]\n labels = ['$Hill$','$pIC50$']\n file_labels = ['hill','pic50']\n for i in xrange(2):\n fig = plt.figure(figsize=(5,4))\n ax = fig.add_subplot(111)\n ax.plot(xs[i],ys[i],color='blue')\n ax.grid()\n ax.set_xlabel(labels[i])\n ax.set_ylabel('Probability density')\n ax.set_title('{} posterior predictive'.format(labels[i][1:-1]))\n fig.tight_layout()\n fig.savefig(figs_dir+\"{}_{}_{}_posterior_predictive.png\".format(drug,channel,file_labels[i]))\n plt.close()\n\n hill_cdf_file, pic50_cdf_file = dr.hierarchical_posterior_predictive_cdf_files(drug,channel,num_expts)\n\n np.savetxt(hill_cdf_file,np.vstack((hill_x_range, hill_cdf_sum)).T)\n np.savetxt(pic50_cdf_file,np.vstack((pic50_x_range, pic50_cdf_sum)).T)\n\n\n hill_uniform_samples = npr.rand(args.samples)\n pic50_uniform_samples = npr.rand(args.samples)\n\n hill_interpolated_inverse_cdf_samples = np.interp(hill_uniform_samples,hill_cdf_sum,hill_x_range)\n pic50_interpolated_inverse_cdf_samples = np.interp(pic50_uniform_samples,pic50_cdf_sum,pic50_x_range)\n\n # save a number of MCMC samples for use in AP models\n # we currently have it set to 500\n # in theory, the more samples, the better the AP histograms will look!\n if save_samples_for_APs:\n samples_file = dr.hierarchical_hill_and_pic50_samples_for_AP_file(drug,channel)\n with open(samples_file,'w') as outfile:\n outfile.write('# {} samples of (Hill,pIC50) drawn from their posterior predictive distributions, as defined by MCMC samples\\n'.format(args.samples))\n np.savetxt(outfile,np.vstack((hill_interpolated_inverse_cdf_samples,pic50_interpolated_inverse_cdf_samples)).T)\n\n\n print \"\\n{} + {} done!\\n\".format(drug,channel)\n return None\n \ndrugs_channels = it.product(drugs_to_run,channels_to_run)\nif (args.num_cores<=1) or (len(drugs_to_run)==1):\n for drug_channel in drugs_channels:\n #run(drug_channel)\n \n # try/except is good when running multiple MCMCs and leaving them overnight,say\n # if one or more crash then the others will survive!\n # however, if you need more \"control\", comment out the try/except, and uncomment the other run(drug_channel) line\n try:\n run(drug_channel)\n except Exception,e:\n print e\n print \"Failed to run {} + {}!\".format(drug_channel[0],drug_channel[1])\n# run multiple MCMCs in parallel\nelif (args.num_cores>1):\n import multiprocessing as mp\n num_cores = min(args.num_cores, mp.cpu_count()-1)\n pool = mp.Pool(processes=num_cores)\n pool.map_async(run,drugs_channels).get(9999999)\n pool.close()\n pool.join()\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:Utf-8 -*-
from .game_action_manager import GameActionManager
from .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager
|
normal
|
{
"blob_id": "48294209d51fbe4dfb2a5130311a10c8a1dd027c",
"index": 9237,
"step-1": "<mask token>\n",
"step-2": "from .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n",
"step-3": "# -*- coding:Utf-8 -*-\n\n\nfrom .game_action_manager import GameActionManager\nfrom .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Rocket:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update(self, x, y, angle, leftPower, rightPower):
self.x = x * config.game['scale'] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - y * config.game['scale']
self.angle = angle
self.angle = utils.wrapToPi(self.angle)
self.pl = leftPower
if self.pl < 0:
self.pl = 0
elif self.pl > 1:
self.pl = 1
self.pr = rightPower
if self.pr < 0:
self.pr = 0
elif self.pr > 1:
self.pr = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rocket:
def __init__(self):
self.x = config.initialPosition['x'] * config.game['scale'
] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - config.initialPosition['y'] * config.game['scale']
self.angle = config.initialPosition['angle']
self.angle = utils.wrapToPi(self.angle)
self.dh = config.game['scale'] * config.rocket['height'] / 2
self.dw = config.game['scale'] * config.rocket['width'] / 2
self.pl = 0
self.pr = 0
<|reserved_special_token_0|>
def update(self, x, y, angle, leftPower, rightPower):
self.x = x * config.game['scale'] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - y * config.game['scale']
self.angle = angle
self.angle = utils.wrapToPi(self.angle)
self.pl = leftPower
if self.pl < 0:
self.pl = 0
elif self.pl > 1:
self.pl = 1
self.pr = rightPower
if self.pr < 0:
self.pr = 0
elif self.pr > 1:
self.pr = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rocket:
def __init__(self):
self.x = config.initialPosition['x'] * config.game['scale'
] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - config.initialPosition['y'] * config.game['scale']
self.angle = config.initialPosition['angle']
self.angle = utils.wrapToPi(self.angle)
self.dh = config.game['scale'] * config.rocket['height'] / 2
self.dw = config.game['scale'] * config.rocket['width'] / 2
self.pl = 0
self.pr = 0
def draw(self, display):
pSin = math.sin(self.angle)
pCos = math.cos(self.angle)
pygame.draw.polygon(display, config.colors['green'], [[self.x +
self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self
.dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -
self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin -
self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [
self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *
pCos + self.dh * pSin]])
pygame.draw.polygon(display, config.colors['red'], [[self.x + (-
self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self
.y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *
pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -
-self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *
pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *
self.dw / 6 * pCos]])
pygame.draw.polygon(display, config.colors['red'], [[self.x + (-
self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.
y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],
[self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.
dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *
self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *
pCos]])
def update(self, x, y, angle, leftPower, rightPower):
self.x = x * config.game['scale'] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - y * config.game['scale']
self.angle = angle
self.angle = utils.wrapToPi(self.angle)
self.pl = leftPower
if self.pl < 0:
self.pl = 0
elif self.pl > 1:
self.pl = 1
self.pr = rightPower
if self.pr < 0:
self.pr = 0
elif self.pr > 1:
self.pr = 1
<|reserved_special_token_1|>
import config
import math
import pygame
import utils
class Rocket:
def __init__(self):
self.x = config.initialPosition['x'] * config.game['scale'
] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - config.initialPosition['y'] * config.game['scale']
self.angle = config.initialPosition['angle']
self.angle = utils.wrapToPi(self.angle)
self.dh = config.game['scale'] * config.rocket['height'] / 2
self.dw = config.game['scale'] * config.rocket['width'] / 2
self.pl = 0
self.pr = 0
def draw(self, display):
pSin = math.sin(self.angle)
pCos = math.cos(self.angle)
pygame.draw.polygon(display, config.colors['green'], [[self.x +
self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self
.dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -
self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin -
self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [
self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *
pCos + self.dh * pSin]])
pygame.draw.polygon(display, config.colors['red'], [[self.x + (-
self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self
.y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *
pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -
-self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *
pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *
self.dw / 6 * pCos]])
pygame.draw.polygon(display, config.colors['red'], [[self.x + (-
self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.
y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],
[self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.
dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *
self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *
pCos]])
def update(self, x, y, angle, leftPower, rightPower):
self.x = x * config.game['scale'] + config.game['width'] / 2
self.y = config.game['height'] - config.game['floorHeight'
] - y * config.game['scale']
self.angle = angle
self.angle = utils.wrapToPi(self.angle)
self.pl = leftPower
if self.pl < 0:
self.pl = 0
elif self.pl > 1:
self.pl = 1
self.pr = rightPower
if self.pr < 0:
self.pr = 0
elif self.pr > 1:
self.pr = 1
<|reserved_special_token_1|>
import config
import math
import pygame
import utils
class Rocket:
def __init__(self):
self.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];
self.angle = config.initialPosition['angle'];
self.angle = utils.wrapToPi(self.angle);
self.dh = config.game['scale']*config.rocket['height']/2; #half display height
self.dw = config.game['scale']*config.rocket['width']/2; # half display height
self.pl = 0 #left motor power
self.pr = 0 #right motor power
def draw(self, display):
pSin = math.sin(self.angle); # precalculated sin
pCos = math.cos(self.angle); # precalculated cos
#main body
pygame.draw.polygon(
display,
config.colors['green'],
[
[
self.x+self.dw*pSin+self.dh*pCos,
self.y+self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin+self.dh*pCos,
self.y-self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin-self.dh*pCos,
self.y-self.dw*pCos+self.dh*pSin,
], [
self.x+self.dw*pSin-self.dh*pCos,
self.y+self.dw*pCos+self.dh*pSin,
]
]
);
#left motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pl)*pCos
+(-self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pl)*pSin
+(-self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-5*self.dw/6)*pCos,
]
]
)
#right motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pr)*pCos
+(self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pr)*pSin
+(self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(5*self.dw/6)*pCos,
]
]
)
def update(self, x, y, angle, leftPower, rightPower):
self.x = x*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];
self.angle = angle
self.angle = utils.wrapToPi(self.angle);
self.pl = leftPower;
if(self.pl<0):
self.pl = 0
elif self.pl>1:
self.pl = 1
self.pr = rightPower;
if(self.pr<0):
self.pr = 0
elif self.pr>1:
self.pr = 1
|
flexible
|
{
"blob_id": "7a1a9d2e773fb783d8522f1ea51e753d5d3782e9",
"index": 7517,
"step-1": "<mask token>\n\n\nclass Rocket:\n <mask token>\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-2": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-3": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-4": "import config\nimport math\nimport pygame\nimport utils\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-5": "import config\nimport math\nimport pygame\nimport utils\n\nclass Rocket:\n\tdef __init__(self):\n\t\tself.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];\n\n\t\tself.angle = config.initialPosition['angle'];\n\t\tself.angle = utils.wrapToPi(self.angle);\n\t\tself.dh = config.game['scale']*config.rocket['height']/2; #half display height\n\t\tself.dw = config.game['scale']*config.rocket['width']/2; # half display height\n\t\tself.pl = 0 #left motor power\n\t\tself.pr = 0 #right motor power\n\n\tdef draw(self, display):\n\t\tpSin = math.sin(self.angle); # precalculated sin\n\t\tpCos = math.cos(self.angle); # precalculated cos\n\t\t\n\t\t#main body\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['green'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x+self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos+self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x+self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos+self.dh*pSin,\n\t\t\t\t]\n\t\t\t]\n\t\t\n\t\t);\n\n\t\t#left motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pl)*pCos\n\t\t\t\t\t+(-self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pl)*pSin\n\t\t\t\t\t+(-self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\t\t#right motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pr)*pCos\n\t\t\t\t\t+(self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pr)*pSin\n\t\t\t\t\t+(self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\tdef update(self, x, y, angle, leftPower, rightPower):\n\t\tself.x = x*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];\n\n\t\tself.angle = angle\n\t\tself.angle = utils.wrapToPi(self.angle);\n\n\t\tself.pl = leftPower;\n\t\tif(self.pl<0):\n\t\t\tself.pl = 0\n\t\telif self.pl>1:\n\t\t\tself.pl = 1\n\n\t\tself.pr = rightPower;\n\t\tif(self.pr<0):\n\t\t\tself.pr = 0\n\t\telif self.pr>1:\n\t\t\tself.pr = 1\n\n\t\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class GraphPickleWriter(GraphWriter):
<|reserved_special_token_0|>
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['GraphPickleWriter']
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import pickle
from typing import Mapping
from openbiolink.edge import Edge
from openbiolink.graph_creation.graph_writer.base import GraphWriter
__all__ = ['GraphPickleWriter']
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],
tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'
) as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'
) as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'
) as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'
) as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'
) as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
<|reserved_special_token_1|>
"""A utility for outputting graphs as pickle files.
To test, run ``openbiolink generate --no-download --no-input --output-format pickle --qual hq``.
"""
import os
import pickle
from typing import Mapping
from openbiolink.edge import Edge
from openbiolink.graph_creation.graph_writer.base import GraphWriter
__all__ = [
"GraphPickleWriter",
]
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, "tp_nodes.pkl"), "wb") as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_edges.pkl"), "wb") as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_namespaces.pkl"), "wb") as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_nodes.pkl"), "wb") as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_edges.pkl"), "wb") as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_namespaces.pkl"), "wb") as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
|
flexible
|
{
"blob_id": "58d069f6700149793c3446bdd4677f08eaf301ee",
"index": 670,
"step-1": "<mask token>\n\n\nclass GraphPickleWriter(GraphWriter):\n <mask token>\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-2": "<mask token>\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-3": "<mask token>\n__all__ = ['GraphPickleWriter']\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-4": "<mask token>\nimport os\nimport pickle\nfrom typing import Mapping\nfrom openbiolink.edge import Edge\nfrom openbiolink.graph_creation.graph_writer.base import GraphWriter\n__all__ = ['GraphPickleWriter']\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-5": "\"\"\"A utility for outputting graphs as pickle files.\n\nTo test, run ``openbiolink generate --no-download --no-input --output-format pickle --qual hq``.\n\"\"\"\n\nimport os\nimport pickle\nfrom typing import Mapping\n\nfrom openbiolink.edge import Edge\nfrom openbiolink.graph_creation.graph_writer.base import GraphWriter\n\n__all__ = [\n \"GraphPickleWriter\",\n]\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, \"tp_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_edges.pkl\"), \"wb\") as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_edges.pkl\"), \"wb\") as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def sort_descending(numbers):
numbers.sort(reverse=True)
|
normal
|
{
"blob_id": "46dc9917d9b3a7caf8d7ba5024b17d3b755fc5db",
"index": 7278,
"step-1": "<mask token>\n",
"step-2": "def sort_descending(numbers):\n numbers.sort(reverse=True)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
<|reserved_special_token_0|>
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
<|reserved_special_token_0|>
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
<|reserved_special_token_0|>
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *
orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=
logits, labels=labels))
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
<|reserved_special_token_1|>
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *
orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=
logits, labels=labels))
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
<|reserved_special_token_1|>
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1+layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
tf.summary.scalar('conv_loss',model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
#train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)
#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)
#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)
#train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = "mnist/conv/SGD_batchnorm"
else:
logdir = "mnist/conv/SGD_no_batchnorm"
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_,summary = sess.run([train_opt,merged], {inputs: batch_xs, labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels,is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels,is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
|
flexible
|
{
"blob_id": "17b3f51779bda5a48c4d77c35d6bbdd2aadb13cd",
"index": 1432,
"step-1": "<mask token>\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\n<mask token>\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\n<mask token>\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\n<mask token>\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n",
"step-3": "<mask token>\nmnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\nlayer = inputs\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\norig_shape = layer.get_shape().as_list()\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *\n orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=labels))\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n",
"step-4": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)\n\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False,\n activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n if layer_depth % 3 == 0:\n strides = 2\n else:\n strides = 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,\n 'same', use_bias=False, activation=None)\n if batch_norm:\n conv_layer = tf.layers.batch_normalization(conv_layer, training=\n is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\nlayer = inputs\nfor layer_i in range(1, 1 + layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\norig_shape = layer.get_shape().as_list()\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *\n orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=labels))\ntf.summary.scalar('conv_loss', model_loss)\nif batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n model_loss)\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nwith tf.Session() as sess:\n merged = tf.summary.merge_all()\n if batch_norm:\n logdir = 'mnist/conv/SGD_batchnorm'\n else:\n logdir = 'mnist/conv/SGD_no_batchnorm'\n writer = tf.summary.FileWriter(logdir, sess.graph)\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary = sess.run([train_opt, merged], {inputs: batch_xs,\n labels: batch_ys, is_training: True})\n writer.add_summary(summary, batch_i)\n if batch_i % 500 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.\n validation.images, labels: mnist.validation.labels,\n is_training: False})\n print(\n 'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n elif batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,\n labels: batch_ys, is_training: False})\n print(\n 'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'\n .format(batch_i, loss, acc))\n acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:\n mnist.validation.labels, is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test\n .labels, is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n",
"step-5": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True, reshape=False)\n\ndef fully_connected(prev_layer, num_units, batch_norm, is_training=False):\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)\n if batch_norm:\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer\n\ndef conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):\n\tif layer_depth % 3 == 0:\n\t strides = 2\n\telse:\n\t\tstrides = 1\n\tconv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)\n\tif batch_norm:\n\t\tconv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n\tconv_layer = tf.nn.relu(conv_layer)\n\treturn conv_layer\n\n\nnum_batches = 3000\nbatch_size = 128\nlearning_rate = 0.002\nlayer_num = 5\nbatch_norm = True\n\ninputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\nlabels = tf.placeholder(tf.float32, [None, 10])\nis_training = tf.placeholder(tf.bool)\n\nlayer = inputs\nfor layer_i in range(1, 1+layer_num):\n layer = conv_layer(layer, layer_i, batch_norm, is_training)\n\norig_shape = layer.get_shape().as_list()\n\nlayer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\nlayer = fully_connected(layer, 100, batch_norm, is_training)\n\nlogits = tf.layers.dense(layer, 10)\nmodel_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\ntf.summary.scalar('conv_loss',model_loss)\n\nif batch_norm: \n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n #train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)\n\t\t#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\nelse:\n train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)\n\t#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)\n\t#train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n\ncorrect_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n\nwith tf.Session() as sess:\n\tmerged = tf.summary.merge_all()\n\tif batch_norm: \n\t\tlogdir = \"mnist/conv/SGD_batchnorm\"\n\telse:\n\t\tlogdir = \"mnist/conv/SGD_no_batchnorm\"\n\twriter = tf.summary.FileWriter(logdir, sess.graph)\n\n\tsess.run(tf.global_variables_initializer())\n\tfor batch_i in range(num_batches):\n\t\tbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n\t\t_,summary = sess.run([train_opt,merged], {inputs: batch_xs, labels: batch_ys, is_training: True})\n\t\t\n\t\twriter.add_summary(summary, batch_i)\n\n\t\tif batch_i % 500 == 0:\n\t\t\tloss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False})\n\t\t\tprint('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\t\telif batch_i % 100 == 0:\n\t\t\tloss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})\n\t\t\tprint('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n\tacc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels,is_training: False})\n\tprint('Final validation accuracy: {:>3.5f}'.format(acc))\n\tacc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels,is_training: False})\n\tprint('Final test accuracy: {:>3.5f}'.format(acc))",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Cigarette(models.Model):
<|reserved_special_token_0|>
user = models.ForeignKey(user, blank=False, null=False, related_name=
'user_cigarettes')
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % self.pk
def get_cigarette_user_id(self):
"""Returns the user id who smoked the cigarette"""
return self.cigarette_user.pk
def get_date(self):
"""Returns the date associated to the cigarette"""
return self.cigarette_date
def get_time(self):
"""Returns the time associated to the cigarette"""
return self.cigarette_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Comment(TimeStampedModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
<|reserved_special_token_0|>
def get_user_id(self):
"""Returns the id of the user who posted the comment"""
return self.comment_user.pk
<|reserved_special_token_0|>
def get_parent_comment_id(self):
"""Returns the id of the parent comment"""
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
user = models.ForeignKey(user, blank=False, null=False, related_name=
'user_cigarettes')
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % self.pk
def get_cigarette_user_id(self):
"""Returns the user id who smoked the cigarette"""
return self.cigarette_user.pk
def get_date(self):
"""Returns the date associated to the cigarette"""
return self.cigarette_date
def get_time(self):
"""Returns the time associated to the cigarette"""
return self.cigarette_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Comment(TimeStampedModel):
"""
Text comment posted by users
"""
user = models.ForeignKey(user, blank=False, null=False, related_name=
'comment_user')
starting_comment = models.ForeignKey('Comment', blank=True, null=True,
related_name='parent_comment')
content = models.TextField(_('comment text'), max_length=
commment_lenght, blank=False, null=False)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
def get_content(self):
"""Returns the text content for the comment"""
return self.content
def get_user_id(self):
"""Returns the id of the user who posted the comment"""
return self.comment_user.pk
def get_date(self):
"""Returns the timestamp associated to the comment"""
return self.created
def get_parent_comment_id(self):
"""Returns the id of the parent comment"""
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
user = models.ForeignKey(user, blank=False, null=False, related_name=
'user_cigarettes')
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % self.pk
def get_cigarette_user_id(self):
"""Returns the user id who smoked the cigarette"""
return self.cigarette_user.pk
def get_date(self):
"""Returns the date associated to the cigarette"""
return self.cigarette_date
def get_time(self):
"""Returns the time associated to the cigarette"""
return self.cigarette_time
<|reserved_special_token_1|>
<|reserved_special_token_0|>
user = settings.AUTH_USER_MODEL
commment_lenght = settings.COMMENT_LENGTH
class Comment(TimeStampedModel):
"""
Text comment posted by users
"""
user = models.ForeignKey(user, blank=False, null=False, related_name=
'comment_user')
starting_comment = models.ForeignKey('Comment', blank=True, null=True,
related_name='parent_comment')
content = models.TextField(_('comment text'), max_length=
commment_lenght, blank=False, null=False)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
def get_content(self):
"""Returns the text content for the comment"""
return self.content
def get_user_id(self):
"""Returns the id of the user who posted the comment"""
return self.comment_user.pk
def get_date(self):
"""Returns the timestamp associated to the comment"""
return self.created
def get_parent_comment_id(self):
"""Returns the id of the parent comment"""
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
user = models.ForeignKey(user, blank=False, null=False, related_name=
'user_cigarettes')
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % self.pk
def get_cigarette_user_id(self):
"""Returns the user id who smoked the cigarette"""
return self.cigarette_user.pk
def get_date(self):
"""Returns the date associated to the cigarette"""
return self.cigarette_date
def get_time(self):
"""Returns the time associated to the cigarette"""
return self.cigarette_time
<|reserved_special_token_1|>
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
user = settings.AUTH_USER_MODEL
commment_lenght = settings.COMMENT_LENGTH
# Entity Comment
class Comment(TimeStampedModel):
"""
Text comment posted by users
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='comment_user')
# Parent comment (optional) - i.e. a comment of a comment
starting_comment = models.ForeignKey('Comment', blank=True, null=True, related_name='parent_comment')
# Text content of a comment
content = models.TextField(_('comment text'), max_length=commment_lenght, blank=False, null=False)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
def get_content(self):
"Returns the text content for the comment"
return self.content
def get_user_id(self):
"Returns the id of the user who posted the comment"
return self.comment_user.pk
def get_date(self):
"Returns the timestamp associated to the comment"
return self.created
def get_parent_comment_id(self):
"Returns the id of the parent comment"
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
# Entity Cigarette
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='user_cigarettes')
# Date and time associated to the cigarette
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % ( self.pk)
def get_cigarette_user_id(self):
"Returns the user id who smoked the cigarette"
return self.cigarette_user.pk
def get_date(self):
"Returns the date associated to the cigarette"
return self.cigarette_date
def get_time(self):
"Returns the time associated to the cigarette"
return self.cigarette_time
|
flexible
|
{
"blob_id": "68ea462f56ba029a7c977d9c8b94e6f913336fb7",
"index": 4680,
"step-1": "<mask token>\n\n\nclass Cigarette(models.Model):\n <mask token>\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-2": "<mask token>\n\n\nclass Comment(TimeStampedModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n <mask token>\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n <mask token>\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-3": "<mask token>\n\n\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'comment_user')\n starting_comment = models.ForeignKey('Comment', blank=True, null=True,\n related_name='parent_comment')\n content = models.TextField(_('comment text'), max_length=\n commment_lenght, blank=False, null=False)\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"\"\"Returns the text content for the comment\"\"\"\n return self.content\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n\n def get_date(self):\n \"\"\"Returns the timestamp associated to the comment\"\"\"\n return self.created\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-4": "<mask token>\nuser = settings.AUTH_USER_MODEL\ncommment_lenght = settings.COMMENT_LENGTH\n\n\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'comment_user')\n starting_comment = models.ForeignKey('Comment', blank=True, null=True,\n related_name='parent_comment')\n content = models.TextField(_('comment text'), max_length=\n commment_lenght, blank=False, null=False)\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"\"\"Returns the text content for the comment\"\"\"\n return self.content\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n\n def get_date(self):\n \"\"\"Returns the timestamp associated to the comment\"\"\"\n return self.created\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-5": "from django.db import models\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils.models import TimeStampedModel\n\nuser = settings.AUTH_USER_MODEL\ncommment_lenght = settings.COMMENT_LENGTH\n\n\n# Entity Comment\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n\n # User - Foreign key\n user = models.ForeignKey(user, blank=False, null=False, related_name='comment_user')\n # Parent comment (optional) - i.e. a comment of a comment\n starting_comment = models.ForeignKey('Comment', blank=True, null=True, related_name='parent_comment')\n # Text content of a comment\n content = models.TextField(_('comment text'), max_length=commment_lenght, blank=False, null=False)\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"Returns the text content for the comment\"\n return self.content\n\n def get_user_id(self):\n \"Returns the id of the user who posted the comment\"\n return self.comment_user.pk\n\n def get_date(self):\n \"Returns the timestamp associated to the comment\"\n return self.created\n\n def get_parent_comment_id(self):\n \"Returns the id of the parent comment\"\n return self.parent_comment.pk\n\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\n# Entity Cigarette\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n\n # User - Foreign key\n user = models.ForeignKey(user, blank=False, null=False, related_name='user_cigarettes')\n # Date and time associated to the cigarette\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % ( self.pk)\n\n\n def get_cigarette_user_id(self):\n \"Returns the user id who smoked the cigarette\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"Returns the date associated to the cigarette\"\n return self.cigarette_date\n\n def get_time(self):\n \"Returns the time associated to the cigarette\"\n return self.cigarette_time\n\n\n",
"step-ids": [
6,
12,
16,
17,
19
]
}
|
[
6,
12,
16,
17,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('fieldsapp', '0003_pole_avatar')]
operations = [migrations.AddField(model_name='pole', name='email',
field=models.CharField(default=1, max_length=50, verbose_name=
'Email'), preserve_default=False), migrations.AddField(model_name=
'pole', name='number', field=models.CharField(default=1, max_length
=20, verbose_name='Номер'), preserve_default=False), migrations.
AlterField(model_name='pole', name='avatar', field=models.
ImageField(upload_to='', verbose_name='Фото')), migrations.
AlterField(model_name='pole', name='body', field=models.TextField(
verbose_name='Описание поля')), migrations.AlterField(model_name=
'pole', name='title', field=models.CharField(max_length=255,
verbose_name='Название поля'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('fieldsapp', '0003_pole_avatar')]
operations = [migrations.AddField(model_name='pole', name='email',
field=models.CharField(default=1, max_length=50, verbose_name=
'Email'), preserve_default=False), migrations.AddField(model_name=
'pole', name='number', field=models.CharField(default=1, max_length
=20, verbose_name='Номер'), preserve_default=False), migrations.
AlterField(model_name='pole', name='avatar', field=models.
ImageField(upload_to='', verbose_name='Фото')), migrations.
AlterField(model_name='pole', name='body', field=models.TextField(
verbose_name='Описание поля')), migrations.AlterField(model_name=
'pole', name='title', field=models.CharField(max_length=255,
verbose_name='Название поля'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.8 on 2019-12-10 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fieldsapp', '0003_pole_avatar'),
]
operations = [
migrations.AddField(
model_name='pole',
name='email',
field=models.CharField(default=1, max_length=50, verbose_name='Email'),
preserve_default=False,
),
migrations.AddField(
model_name='pole',
name='number',
field=models.CharField(default=1, max_length=20, verbose_name='Номер'),
preserve_default=False,
),
migrations.AlterField(
model_name='pole',
name='avatar',
field=models.ImageField(upload_to='', verbose_name='Фото'),
),
migrations.AlterField(
model_name='pole',
name='body',
field=models.TextField(verbose_name='Описание поля'),
),
migrations.AlterField(
model_name='pole',
name='title',
field=models.CharField(max_length=255, verbose_name='Название поля'),
),
]
|
flexible
|
{
"blob_id": "9d6516ea099e035fb97e5165071103698a7ec140",
"index": 5812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fieldsapp', '0003_pole_avatar')]\n operations = [migrations.AddField(model_name='pole', name='email',\n field=models.CharField(default=1, max_length=50, verbose_name=\n 'Email'), preserve_default=False), migrations.AddField(model_name=\n 'pole', name='number', field=models.CharField(default=1, max_length\n =20, verbose_name='Номер'), preserve_default=False), migrations.\n AlterField(model_name='pole', name='avatar', field=models.\n ImageField(upload_to='', verbose_name='Фото')), migrations.\n AlterField(model_name='pole', name='body', field=models.TextField(\n verbose_name='Описание поля')), migrations.AlterField(model_name=\n 'pole', name='title', field=models.CharField(max_length=255,\n verbose_name='Название поля'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fieldsapp', '0003_pole_avatar')]\n operations = [migrations.AddField(model_name='pole', name='email',\n field=models.CharField(default=1, max_length=50, verbose_name=\n 'Email'), preserve_default=False), migrations.AddField(model_name=\n 'pole', name='number', field=models.CharField(default=1, max_length\n =20, verbose_name='Номер'), preserve_default=False), migrations.\n AlterField(model_name='pole', name='avatar', field=models.\n ImageField(upload_to='', verbose_name='Фото')), migrations.\n AlterField(model_name='pole', name='body', field=models.TextField(\n verbose_name='Описание поля')), migrations.AlterField(model_name=\n 'pole', name='title', field=models.CharField(max_length=255,\n verbose_name='Название поля'))]\n",
"step-5": "# Generated by Django 2.2.8 on 2019-12-10 10:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fieldsapp', '0003_pole_avatar'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pole',\n name='email',\n field=models.CharField(default=1, max_length=50, verbose_name='Email'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='pole',\n name='number',\n field=models.CharField(default=1, max_length=20, verbose_name='Номер'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='pole',\n name='avatar',\n field=models.ImageField(upload_to='', verbose_name='Фото'),\n ),\n migrations.AlterField(\n model_name='pole',\n name='body',\n field=models.TextField(verbose_name='Описание поля'),\n ),\n migrations.AlterField(\n model_name='pole',\n name='title',\n field=models.CharField(max_length=255, verbose_name='Название поля'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
PROYECTO : Portal EDCA-HN
NOMBRE : ZipTools
Descripcion : Clase utilitaria para descomprimir archivos ZIP.
MM/DD/YYYY Colaboradores Descripcion
05/07/2019 Alla Duenas Creacion.
"""
import zipfile
from edca_mensajes import EdcaErrores as err, EdcaMensajes as msg
from edca_logs.EdcaLogger import EdcaLogger as log
class ZipTools:
# Funcion para cromprimir los archivos descargados
@staticmethod
def comprimir(archivo, dir_comprimir):
__archivo_zip = archivo[:archivo.find(".")] + ".zip"
try:
with zipfile.ZipFile(__archivo_zip,'w', zipfile.ZIP_DEFLATED) as archivoZip:
archivoZip.write(archivo)
archivoZip.close()
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
# Funcion para descromprimir los archivos descargados
@staticmethod
def descomprimir(archivo, dir_extraer):
try:
zip_ref = zipfile.ZipFile(archivo, 'r')
zip_list = zip_ref.infolist()
for contenido in zip_list:
log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR,
"EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)
zip_ref.extractall(dir_extraer)
zip_ref.close()
log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
@staticmethod
def obtener_contenido_zip(archivo):
global zp
try:
zip_ref = zipfile.ZipFile(archivo, 'r')
zip_list = zip_ref.infolist()
for contenido in zip_list:
zp = contenido.filename
zip_ref.close()
return zp
except PermissionError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP)
% PermissionError.filename % PermissionError.strerror)
except IOError:
log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, "EXTRAER ARCHIVO",
msg.EdcaMensajes.obt_mensaje(
err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)
|
normal
|
{
"blob_id": "1190e802fde6c2c6f48bd2720688bd9231b622e0",
"index": 6564,
"step-1": "<mask token>\n\n\nclass ZipTools:\n <mask token>\n\n @staticmethod\n def descomprimir(archivo, dir_extraer):\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR, 'EXTRAER ARCHIVO', msg.\n EdcaMensajes.obt_mensaje(err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)\n zip_ref.extractall(dir_extraer)\n zip_ref.close()\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ZipTools:\n <mask token>\n\n @staticmethod\n def descomprimir(archivo, dir_extraer):\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR, 'EXTRAER ARCHIVO', msg.\n EdcaMensajes.obt_mensaje(err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)\n zip_ref.extractall(dir_extraer)\n zip_ref.close()\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n\n @staticmethod\n def obtener_contenido_zip(archivo):\n global zp\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n zp = contenido.filename\n zip_ref.close()\n return zp\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n",
"step-3": "<mask token>\n\n\nclass ZipTools:\n\n @staticmethod\n def comprimir(archivo, dir_comprimir):\n __archivo_zip = archivo[:archivo.find('.')] + '.zip'\n try:\n with zipfile.ZipFile(__archivo_zip, 'w', zipfile.ZIP_DEFLATED\n ) as archivoZip:\n archivoZip.write(archivo)\n archivoZip.close()\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n\n @staticmethod\n def descomprimir(archivo, dir_extraer):\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR, 'EXTRAER ARCHIVO', msg.\n EdcaMensajes.obt_mensaje(err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)\n zip_ref.extractall(dir_extraer)\n zip_ref.close()\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n\n @staticmethod\n def obtener_contenido_zip(archivo):\n global zp\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n zp = contenido.filename\n zip_ref.close()\n return zp\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n",
"step-4": "<mask token>\nimport zipfile\nfrom edca_mensajes import EdcaErrores as err, EdcaMensajes as msg\nfrom edca_logs.EdcaLogger import EdcaLogger as log\n\n\nclass ZipTools:\n\n @staticmethod\n def comprimir(archivo, dir_comprimir):\n __archivo_zip = archivo[:archivo.find('.')] + '.zip'\n try:\n with zipfile.ZipFile(__archivo_zip, 'w', zipfile.ZIP_DEFLATED\n ) as archivoZip:\n archivoZip.write(archivo)\n archivoZip.close()\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n\n @staticmethod\n def descomprimir(archivo, dir_extraer):\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR, 'EXTRAER ARCHIVO', msg.\n EdcaMensajes.obt_mensaje(err.EdcaErrores.\n INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)\n zip_ref.extractall(dir_extraer)\n zip_ref.close()\n log.registrar_log_info(__name__, err.EdcaErrores.\n INFO_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n\n @staticmethod\n def obtener_contenido_zip(archivo):\n global zp\n try:\n zip_ref = zipfile.ZipFile(archivo, 'r')\n zip_list = zip_ref.infolist()\n for contenido in zip_list:\n zp = contenido.filename\n zip_ref.close()\n return zp\n except PermissionError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) %\n PermissionError.filename % PermissionError.strerror)\n except IOError:\n log.registrar_log_error(__name__, err.EdcaErrores.\n ERR_ZIPTOOL_UNZIP, 'EXTRAER ARCHIVO', msg.EdcaMensajes.\n obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.\n filename % IOError.strerror)\n",
"step-5": "\"\"\"\r\nPROYECTO : Portal EDCA-HN\r\nNOMBRE : ZipTools\r\nDescripcion : Clase utilitaria para descomprimir archivos ZIP.\r\n\r\nMM/DD/YYYY Colaboradores Descripcion\r\n05/07/2019 Alla Duenas Creacion. \r\n\"\"\"\r\n\r\nimport zipfile\r\nfrom edca_mensajes import EdcaErrores as err, EdcaMensajes as msg\r\nfrom edca_logs.EdcaLogger import EdcaLogger as log\r\n\r\n\r\nclass ZipTools:\r\n\r\n # Funcion para cromprimir los archivos descargados\r\n @staticmethod\r\n def comprimir(archivo, dir_comprimir):\r\n __archivo_zip = archivo[:archivo.find(\".\")] + \".zip\"\r\n try:\r\n with zipfile.ZipFile(__archivo_zip,'w', zipfile.ZIP_DEFLATED) as archivoZip:\r\n archivoZip.write(archivo)\r\n archivoZip.close()\r\n\r\n except PermissionError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)\r\n except IOError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(\r\n err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)\r\n \r\n # Funcion para descromprimir los archivos descargados\r\n @staticmethod\r\n def descomprimir(archivo, dir_extraer):\r\n try:\r\n zip_ref = zipfile.ZipFile(archivo, 'r')\r\n zip_list = zip_ref.infolist()\r\n for contenido in zip_list:\r\n log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR,\r\n \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_PRINT_DIR) % contenido.filename)\r\n zip_ref.extractall(dir_extraer)\r\n zip_ref.close()\r\n log.registrar_log_info(__name__, err.EdcaErrores.INFO_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.INFO_ZIPTOOL_UNZIP))\r\n except PermissionError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % PermissionError.filename % PermissionError.strerror)\r\n except IOError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(\r\n err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)\r\n\r\n @staticmethod\r\n def obtener_contenido_zip(archivo):\r\n global zp\r\n try:\r\n zip_ref = zipfile.ZipFile(archivo, 'r')\r\n zip_list = zip_ref.infolist()\r\n for contenido in zip_list:\r\n zp = contenido.filename\r\n zip_ref.close()\r\n return zp\r\n except PermissionError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(err.EdcaErrores.ERR_ZIPTOOL_UNZIP)\r\n % PermissionError.filename % PermissionError.strerror)\r\n except IOError:\r\n log.registrar_log_error(__name__, err.EdcaErrores.ERR_ZIPTOOL_UNZIP, \"EXTRAER ARCHIVO\",\r\n msg.EdcaMensajes.obt_mensaje(\r\n err.EdcaErrores.ERR_ZIPTOOL_UNZIP) % IOError.filename % IOError.strerror)\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class MainWindow(QWidget):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range(0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText('Scope Type')
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
<|reserved_special_token_0|>
def startStopTest(self):
self.scope.setState(1, 'ON')
self.scope.setState(2, 'ON')
self.scope.setState(3, 'ON')
self.scope.setState(4, 'ON')
self.scope.setBandwidth(1, 'ON')
self.scope.setBandwidth(2, 'ON')
self.scope.setBandwidth(3, 'ON')
self.scope.setBandwidth(4, 'ON')
self.scope.setEdgeTrigger(3, 50, 'FALL')
def scopeShot(self):
print('Get Scope Shot')
self.scope.clear()
print('ReadIDN Returns: ' + str(self.scope.readIDN()))
print('next line')
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')
self.pixmap = QPixmap('siglentImage.png')
self.pictLabel.setText('Image Here')
self.pictLabel.setPixmap(self.pixmap)
self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MainWindow(QWidget):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range(0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText('Scope Type')
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
self.scope, self.scopeName = self.configInstrument.initInstrument(
'172.18.18.24')
print('Configured Scope: ' + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, 'ON')
self.scope.setState(2, 'ON')
self.scope.setState(3, 'ON')
self.scope.setState(4, 'ON')
self.scope.setBandwidth(1, 'ON')
self.scope.setBandwidth(2, 'ON')
self.scope.setBandwidth(3, 'ON')
self.scope.setBandwidth(4, 'ON')
self.scope.setEdgeTrigger(3, 50, 'FALL')
def scopeShot(self):
print('Get Scope Shot')
self.scope.clear()
print('ReadIDN Returns: ' + str(self.scope.readIDN()))
print('next line')
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')
self.pixmap = QPixmap('siglentImage.png')
self.pictLabel.setText('Image Here')
self.pictLabel.setPixmap(self.pixmap)
self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../Instrument_Libraries')
<|reserved_special_token_0|>
myappid = u'mycompany.myproduct.subproduct.version'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class MainWindow(QWidget):
instrumentName = 'Unitialized Instrument'
instrumentList = []
instrumentTypes = {}
instrumentKey = 'Uninitialized Key'
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range(0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText('Scope Type')
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
self.scope, self.scopeName = self.configInstrument.initInstrument(
'172.18.18.24')
print('Configured Scope: ' + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, 'ON')
self.scope.setState(2, 'ON')
self.scope.setState(3, 'ON')
self.scope.setState(4, 'ON')
self.scope.setBandwidth(1, 'ON')
self.scope.setBandwidth(2, 'ON')
self.scope.setBandwidth(3, 'ON')
self.scope.setBandwidth(4, 'ON')
self.scope.setEdgeTrigger(3, 50, 'FALL')
def scopeShot(self):
print('Get Scope Shot')
self.scope.clear()
print('ReadIDN Returns: ' + str(self.scope.readIDN()))
print('next line')
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')
self.pixmap = QPixmap('siglentImage.png')
self.pictLabel.setText('Image Here')
self.pictLabel.setPixmap(self.pixmap)
self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())
if __name__ == '__main__':
app = QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
ex = MainWindow()
app.exec_()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar, QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout
from PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot
sys.path.append('../Instrument_Libraries')
from instrumentConfig import Instrument
import ctypes
myappid = u'mycompany.myproduct.subproduct.version'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class MainWindow(QWidget):
instrumentName = 'Unitialized Instrument'
instrumentList = []
instrumentTypes = {}
instrumentKey = 'Uninitialized Key'
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range(0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText('Scope Type')
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
self.scope, self.scopeName = self.configInstrument.initInstrument(
'172.18.18.24')
print('Configured Scope: ' + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, 'ON')
self.scope.setState(2, 'ON')
self.scope.setState(3, 'ON')
self.scope.setState(4, 'ON')
self.scope.setBandwidth(1, 'ON')
self.scope.setBandwidth(2, 'ON')
self.scope.setBandwidth(3, 'ON')
self.scope.setBandwidth(4, 'ON')
self.scope.setEdgeTrigger(3, 50, 'FALL')
def scopeShot(self):
print('Get Scope Shot')
self.scope.clear()
print('ReadIDN Returns: ' + str(self.scope.readIDN()))
print('next line')
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')
self.pixmap = QPixmap('siglentImage.png')
self.pictLabel.setText('Image Here')
self.pictLabel.setPixmap(self.pixmap)
self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())
if __name__ == '__main__':
app = QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
ex = MainWindow()
app.exec_()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on 11/03/2020
@author: stevenp@valvesoftware.com
"""
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,
QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)
from PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot
#append the relative location you want to import from
sys.path.append("../Instrument_Libraries")
from instrumentConfig import Instrument
#For some reason the following code needs to be here for the Steam icon to show on the taskbar.
#Google code, don't know why.
import ctypes
myappid = u'mycompany.myproduct.subproduct.version' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
class MainWindow(QWidget):
instrumentName = "Unitialized Instrument"
instrumentList = []
#Instrument Types is a dictionary
instrumentTypes = {}
instrumentKey = "Uninitialized Key"
def __init__(self):
super(MainWindow, self).__init__()
self.configInstrument = Instrument()
self.instrumentList = self.configInstrument.listInstruments()
self.instrumentTypes = self.configInstrument.listInstrumentTypes()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 500, 600)
self.setWindowTitle('Tektronix Channel Label Widget')
self.setWindowIcon(QIcon('Steam_icon_logo.gif'))
instrumentGroupBox = QGroupBox()
instrumentGrid = QGridLayout()
self.scopeComboBox = QComboBox()
for index in range (0, len(self.instrumentList)):
self.scopeComboBox.addItem(self.instrumentList[index].rstrip())
instrumentGrid.addWidget(self.scopeComboBox, 0, 0)
self.initScopeButton = QPushButton('Initialize Scope', self)
self.initScopeButton.clicked[bool].connect(self.initScope)
instrumentGrid.addWidget(self.initScopeButton, 1, 0)
scopeLabel = QLabel(self)
scopeLabel.setText("Scope Type")
instrumentGrid.addWidget(scopeLabel, 2, 0)
self.scopeIDN = QLabel(self)
self.scopeIDN.setText(self.instrumentName)
instrumentGrid.addWidget(self.scopeIDN, 3, 0)
instrumentGroupBox.setLayout(instrumentGrid)
instrumentGroupBox.setLayout(instrumentGrid)
startButtonGroupBox = QGroupBox()
startButtonLayout = QHBoxLayout()
self.startStopButton = QPushButton('Test Scope Connection', self)
self.startStopButton.clicked[bool].connect(self.startStopTest)
self.startStopButton.setEnabled(False)
startButtonLayout.addWidget(self.startStopButton)
self.getScopeShot = QPushButton('Get Scope Shot', self)
pictureGroupBox = QGroupBox()
pictureLayout = QHBoxLayout()
self.pictLabel = QLabel(self)
pictureLayout.addWidget(self.pictLabel)
pictureGroupBox.setLayout(pictureLayout)
self.getScopeShot.clicked[bool].connect(self.scopeShot)
self.getScopeShot.setEnabled(False)
startButtonLayout.addWidget(self.getScopeShot)
startButtonGroupBox.setLayout(startButtonLayout)
grid = QGridLayout()
grid.addWidget(instrumentGroupBox, 0, 0)
grid.addWidget(startButtonGroupBox, 1, 0)
grid.addWidget(pictureGroupBox, 2, 0)
self.setLayout(grid)
self.show()
def initScope(self):
self.instrumentName = self.scopeComboBox.currentText()
# self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)
self.scope, self.scopeName = self.configInstrument.initInstrument("172.18.18.24")
print ("Configured Scope: " + self.scopeName)
self.scopeIDN.setText(self.scopeName)
self.startStopButton.setEnabled(True)
self.getScopeShot.setEnabled(True)
def startStopTest(self):
self.scope.setState(1, "ON")
self.scope.setState(2, "ON")
self.scope.setState(3, "ON")
self.scope.setState(4, "ON")
self.scope.setBandwidth(1, "ON")
self.scope.setBandwidth(2, "ON")
self.scope.setBandwidth(3, "ON")
self.scope.setBandwidth(4, "ON")
#Siglent library hard codes trigger level to mV
self.scope.setEdgeTrigger(3, 50, "FALL")
def scopeShot(self):
print ("Get Scope Shot")
self.scope.clear()
print ("ReadIDN Returns: " + str(self.scope.readIDN()))
print ("next line")
self.scope.clear()
self.scope.scopeScreenCaptureCopyToPC("siglentImage.png")
# loading image
self.pixmap = QPixmap("siglentImage.png")
# adding image to label
self.pictLabel.setText("Image Here")
self.pictLabel.setPixmap(self.pixmap)
# Optional, resize label to image size
self.pictLabel.resize(self.pixmap.width(),
self.pixmap.height())
if __name__ == '__main__':
app = QCoreApplication.instance()
if app is None:
app = QApplication(sys.argv)
ex = MainWindow()
app.exec_()
|
flexible
|
{
"blob_id": "33464f19c42d1a192792a73297f4d926df78ab71",
"index": 2906,
"step-1": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n <mask token>\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow(QWidget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../Instrument_Libraries')\n<mask token>\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n",
"step-4": "<mask token>\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar, QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\nsys.path.append('../Instrument_Libraries')\nfrom instrumentConfig import Instrument\nimport ctypes\nmyappid = u'mycompany.myproduct.subproduct.version'\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\nclass MainWindow(QWidget):\n instrumentName = 'Unitialized Instrument'\n instrumentList = []\n instrumentTypes = {}\n instrumentKey = 'Uninitialized Key'\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.configInstrument = Instrument()\n self.instrumentList = self.configInstrument.listInstruments()\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 500, 600)\n self.setWindowTitle('Tektronix Channel Label Widget')\n self.setWindowIcon(QIcon('Steam_icon_logo.gif'))\n instrumentGroupBox = QGroupBox()\n instrumentGrid = QGridLayout()\n self.scopeComboBox = QComboBox()\n for index in range(0, len(self.instrumentList)):\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip())\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\n self.initScopeButton = QPushButton('Initialize Scope', self)\n self.initScopeButton.clicked[bool].connect(self.initScope)\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\n scopeLabel = QLabel(self)\n scopeLabel.setText('Scope Type')\n instrumentGrid.addWidget(scopeLabel, 2, 0)\n self.scopeIDN = QLabel(self)\n self.scopeIDN.setText(self.instrumentName)\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\n instrumentGroupBox.setLayout(instrumentGrid)\n instrumentGroupBox.setLayout(instrumentGrid)\n startButtonGroupBox = QGroupBox()\n startButtonLayout = QHBoxLayout()\n self.startStopButton = QPushButton('Test Scope Connection', self)\n self.startStopButton.clicked[bool].connect(self.startStopTest)\n self.startStopButton.setEnabled(False)\n startButtonLayout.addWidget(self.startStopButton)\n self.getScopeShot = QPushButton('Get Scope Shot', self)\n pictureGroupBox = QGroupBox()\n pictureLayout = QHBoxLayout()\n self.pictLabel = QLabel(self)\n pictureLayout.addWidget(self.pictLabel)\n pictureGroupBox.setLayout(pictureLayout)\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\n self.getScopeShot.setEnabled(False)\n startButtonLayout.addWidget(self.getScopeShot)\n startButtonGroupBox.setLayout(startButtonLayout)\n grid = QGridLayout()\n grid.addWidget(instrumentGroupBox, 0, 0)\n grid.addWidget(startButtonGroupBox, 1, 0)\n grid.addWidget(pictureGroupBox, 2, 0)\n self.setLayout(grid)\n self.show()\n\n def initScope(self):\n self.instrumentName = self.scopeComboBox.currentText()\n self.scope, self.scopeName = self.configInstrument.initInstrument(\n '172.18.18.24')\n print('Configured Scope: ' + self.scopeName)\n self.scopeIDN.setText(self.scopeName)\n self.startStopButton.setEnabled(True)\n self.getScopeShot.setEnabled(True)\n\n def startStopTest(self):\n self.scope.setState(1, 'ON')\n self.scope.setState(2, 'ON')\n self.scope.setState(3, 'ON')\n self.scope.setState(4, 'ON')\n self.scope.setBandwidth(1, 'ON')\n self.scope.setBandwidth(2, 'ON')\n self.scope.setBandwidth(3, 'ON')\n self.scope.setBandwidth(4, 'ON')\n self.scope.setEdgeTrigger(3, 50, 'FALL')\n\n def scopeShot(self):\n print('Get Scope Shot')\n self.scope.clear()\n print('ReadIDN Returns: ' + str(self.scope.readIDN()))\n print('next line')\n self.scope.clear()\n self.scope.scopeScreenCaptureCopyToPC('siglentImage.png')\n self.pixmap = QPixmap('siglentImage.png')\n self.pictLabel.setText('Image Here')\n self.pictLabel.setPixmap(self.pixmap)\n self.pictLabel.resize(self.pixmap.width(), self.pixmap.height())\n\n\nif __name__ == '__main__':\n app = QCoreApplication.instance()\n if app is None:\n app = QApplication(sys.argv)\n ex = MainWindow()\n app.exec_()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on 11/03/2020\r\n\r\n@author: stevenp@valvesoftware.com\r\n\"\"\"\r\nimport sys\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton, QVBoxLayout, QCheckBox, QProgressBar,\r\n QGroupBox, QComboBox, QLineEdit, QPushButton, QMessageBox, QInputDialog, QDialog, QDialogButtonBox, QSlider, QGridLayout, QHBoxLayout)\r\nfrom PyQt5.QtGui import QIcon, QPainter, QPen, QFont, QPixmap\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\r\n\r\n#append the relative location you want to import from\r\nsys.path.append(\"../Instrument_Libraries\")\r\nfrom instrumentConfig import Instrument\r\n \r\n#For some reason the following code needs to be here for the Steam icon to show on the taskbar.\r\n#Google code, don't know why.\r\nimport ctypes\r\nmyappid = u'mycompany.myproduct.subproduct.version' # arbitrary string\r\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \r\n\r\nclass MainWindow(QWidget):\r\n\r\n instrumentName = \"Unitialized Instrument\"\r\n \r\n \r\n instrumentList = []\r\n #Instrument Types is a dictionary\r\n instrumentTypes = {}\r\n instrumentKey = \"Uninitialized Key\"\r\n \r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n \r\n self.configInstrument = Instrument()\r\n self.instrumentList = self.configInstrument.listInstruments()\r\n self.instrumentTypes = self.configInstrument.listInstrumentTypes()\r\n\r\n self.initUI()\r\n\r\n\r\n def initUI(self): \r\n \r\n self.setGeometry(300, 300, 500, 600)\r\n self.setWindowTitle('Tektronix Channel Label Widget')\r\n self.setWindowIcon(QIcon('Steam_icon_logo.gif')) \r\n \r\n instrumentGroupBox = QGroupBox()\r\n instrumentGrid = QGridLayout()\r\n \r\n self.scopeComboBox = QComboBox()\r\n for index in range (0, len(self.instrumentList)):\r\n self.scopeComboBox.addItem(self.instrumentList[index].rstrip()) \r\n instrumentGrid.addWidget(self.scopeComboBox, 0, 0)\r\n \r\n self.initScopeButton = QPushButton('Initialize Scope', self)\r\n self.initScopeButton.clicked[bool].connect(self.initScope)\r\n \r\n instrumentGrid.addWidget(self.initScopeButton, 1, 0)\r\n\r\n scopeLabel = QLabel(self)\r\n scopeLabel.setText(\"Scope Type\")\r\n instrumentGrid.addWidget(scopeLabel, 2, 0)\r\n\r\n self.scopeIDN = QLabel(self)\r\n self.scopeIDN.setText(self.instrumentName)\r\n instrumentGrid.addWidget(self.scopeIDN, 3, 0)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n \r\n instrumentGroupBox.setLayout(instrumentGrid)\r\n\r\n startButtonGroupBox = QGroupBox()\r\n startButtonLayout = QHBoxLayout()\r\n self.startStopButton = QPushButton('Test Scope Connection', self)\r\n \r\n self.startStopButton.clicked[bool].connect(self.startStopTest)\r\n self.startStopButton.setEnabled(False)\r\n startButtonLayout.addWidget(self.startStopButton)\r\n\r\n\r\n self.getScopeShot = QPushButton('Get Scope Shot', self)\r\n \r\n\r\n pictureGroupBox = QGroupBox()\r\n pictureLayout = QHBoxLayout()\r\n self.pictLabel = QLabel(self)\r\n pictureLayout.addWidget(self.pictLabel)\r\n pictureGroupBox.setLayout(pictureLayout)\r\n\r\n self.getScopeShot.clicked[bool].connect(self.scopeShot)\r\n self.getScopeShot.setEnabled(False)\r\n startButtonLayout.addWidget(self.getScopeShot)\r\n\r\n startButtonGroupBox.setLayout(startButtonLayout)\r\n\r\n grid = QGridLayout()\r\n grid.addWidget(instrumentGroupBox, 0, 0)\r\n grid.addWidget(startButtonGroupBox, 1, 0)\r\n grid.addWidget(pictureGroupBox, 2, 0)\r\n\r\n self.setLayout(grid)\r\n\r\n self.show()\r\n\r\n def initScope(self):\r\n \r\n self.instrumentName = self.scopeComboBox.currentText()\r\n \r\n # self.scope, self.scopeName = self.configInstrument.initInstrument(self.instrumentName)\r\n self.scope, self.scopeName = self.configInstrument.initInstrument(\"172.18.18.24\")\r\n \r\n print (\"Configured Scope: \" + self.scopeName)\r\n \r\n self.scopeIDN.setText(self.scopeName)\r\n\r\n self.startStopButton.setEnabled(True)\r\n self.getScopeShot.setEnabled(True)\r\n\r\n def startStopTest(self):\r\n \r\n self.scope.setState(1, \"ON\")\r\n self.scope.setState(2, \"ON\")\r\n self.scope.setState(3, \"ON\")\r\n self.scope.setState(4, \"ON\")\r\n \r\n self.scope.setBandwidth(1, \"ON\")\r\n self.scope.setBandwidth(2, \"ON\")\r\n self.scope.setBandwidth(3, \"ON\")\r\n self.scope.setBandwidth(4, \"ON\")\r\n \r\n #Siglent library hard codes trigger level to mV\r\n self.scope.setEdgeTrigger(3, 50, \"FALL\")\r\n \r\n def scopeShot(self):\r\n print (\"Get Scope Shot\")\r\n self.scope.clear()\r\n print (\"ReadIDN Returns: \" + str(self.scope.readIDN()))\r\n print (\"next line\")\r\n self.scope.clear()\r\n \r\n self.scope.scopeScreenCaptureCopyToPC(\"siglentImage.png\")\r\n \r\n # loading image \r\n self.pixmap = QPixmap(\"siglentImage.png\") \r\n \r\n # adding image to label \r\n self.pictLabel.setText(\"Image Here\") \r\n self.pictLabel.setPixmap(self.pixmap) \r\n \r\n # Optional, resize label to image size \r\n self.pictLabel.resize(self.pixmap.width(), \r\n self.pixmap.height()) \r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n app = QCoreApplication.instance()\r\n if app is None:\r\n app = QApplication(sys.argv)\r\n ex = MainWindow()\r\n app.exec_() \r\n",
"step-ids": [
4,
6,
9,
10,
11
]
}
|
[
4,
6,
9,
10,
11
] |
# Copyright (C) 2020 Claudio Marques - All Rights Reserved
dataset_path = "data/output/dataset{toReplace}.csv"
dataset_path_final = "data/output/final/datasetFinal.csv"
log_path = "data/logs/output_append.log"
numberOfThreads = 45
inputFileMalign = "data/input/malign/all.log"
outputFileMalign = "data/output/fileMalign.csv"
sampleMalign = 300
inputFileBenignAAAA = "data/input/benign/aaaa/all.log"
outputFileBenignAAA = "data/output/fileBenignAAAA.csv"
sampleAAAA = 100
inputFileBenignCNAME = "data/input/benign/cname/all.log"
outputFileBenignCNAME = "data/output/fileBenignCNAME.csv"
sampleCNAME = 100
inputFileBenignMX = "data/input/benign/mx/all.log"
outputFileBenignMX = "data/output/fileBenignMX.csv"
sampleMX = 100
alexaDbPath = "utils/Database/AlexaDB/top-1m.csv"
ports = [80, 443, 21, 22, 23, 25, 53, 110, 143, 161, 445, 465, 587, 993, 995, 3306, 3389, 7547, 8080, 8888]
fileHeader = "Domain,DNSRecordType,MXDnsResponse,TXTDnsResponse,HasSPFInfo,HasDkimInfo,HasDmarcInfo,Ip,DomainInAlexaDB,CommonPorts,CountryCode,RegisteredCountry,CreationDate," \
"LastUpdateDate,ASN,HttpResponseCode,RegisteredOrg,SubdomainNumber,Entropy,EntropyOfSubDomains,StrangeCharacters," \
"TLD,IpReputation,DomainReputation," \
"ConsoantRatio,NumericRatio,SpecialCharRatio,VowelRatio,ConsoantSequence,VowelSequence,NumericSequence,SpecialCharSequence,DomainLength,Class"
headerRegex = "%s,%s,%d,%d,%d,%d,%d,%s,%d,%d,%s,%s,%d," \
"%d,%d,%d,%s,%d,%d,%d,%d," \
"%s,%d,%d," \
"%0.1f,%0.1f,%0.1f,%0.1f,%d,%d,%d,%d,%d,%d\n"
sublist3rEngines = "bing,passivedns"
|
normal
|
{
"blob_id": "305133d4840741bd5c318a99a96660d8988dd61a",
"index": 7772,
"step-1": "<mask token>\n",
"step-2": "dataset_path = 'data/output/dataset{toReplace}.csv'\ndataset_path_final = 'data/output/final/datasetFinal.csv'\nlog_path = 'data/logs/output_append.log'\nnumberOfThreads = 45\ninputFileMalign = 'data/input/malign/all.log'\noutputFileMalign = 'data/output/fileMalign.csv'\nsampleMalign = 300\ninputFileBenignAAAA = 'data/input/benign/aaaa/all.log'\noutputFileBenignAAA = 'data/output/fileBenignAAAA.csv'\nsampleAAAA = 100\ninputFileBenignCNAME = 'data/input/benign/cname/all.log'\noutputFileBenignCNAME = 'data/output/fileBenignCNAME.csv'\nsampleCNAME = 100\ninputFileBenignMX = 'data/input/benign/mx/all.log'\noutputFileBenignMX = 'data/output/fileBenignMX.csv'\nsampleMX = 100\nalexaDbPath = 'utils/Database/AlexaDB/top-1m.csv'\nports = [80, 443, 21, 22, 23, 25, 53, 110, 143, 161, 445, 465, 587, 993, \n 995, 3306, 3389, 7547, 8080, 8888]\nfileHeader = (\n 'Domain,DNSRecordType,MXDnsResponse,TXTDnsResponse,HasSPFInfo,HasDkimInfo,HasDmarcInfo,Ip,DomainInAlexaDB,CommonPorts,CountryCode,RegisteredCountry,CreationDate,LastUpdateDate,ASN,HttpResponseCode,RegisteredOrg,SubdomainNumber,Entropy,EntropyOfSubDomains,StrangeCharacters,TLD,IpReputation,DomainReputation,ConsoantRatio,NumericRatio,SpecialCharRatio,VowelRatio,ConsoantSequence,VowelSequence,NumericSequence,SpecialCharSequence,DomainLength,Class'\n )\nheaderRegex = \"\"\"%s,%s,%d,%d,%d,%d,%d,%s,%d,%d,%s,%s,%d,%d,%d,%d,%s,%d,%d,%d,%d,%s,%d,%d,%0.1f,%0.1f,%0.1f,%0.1f,%d,%d,%d,%d,%d,%d\n\"\"\"\nsublist3rEngines = 'bing,passivedns'\n",
"step-3": "# Copyright (C) 2020 Claudio Marques - All Rights Reserved\r\ndataset_path = \"data/output/dataset{toReplace}.csv\"\r\ndataset_path_final = \"data/output/final/datasetFinal.csv\"\r\nlog_path = \"data/logs/output_append.log\"\r\nnumberOfThreads = 45\r\n\r\ninputFileMalign = \"data/input/malign/all.log\"\r\noutputFileMalign = \"data/output/fileMalign.csv\"\r\nsampleMalign = 300\r\n\r\ninputFileBenignAAAA = \"data/input/benign/aaaa/all.log\"\r\noutputFileBenignAAA = \"data/output/fileBenignAAAA.csv\"\r\nsampleAAAA = 100\r\n\r\ninputFileBenignCNAME = \"data/input/benign/cname/all.log\"\r\noutputFileBenignCNAME = \"data/output/fileBenignCNAME.csv\"\r\nsampleCNAME = 100\r\n\r\ninputFileBenignMX = \"data/input/benign/mx/all.log\"\r\noutputFileBenignMX = \"data/output/fileBenignMX.csv\"\r\nsampleMX = 100\r\n\r\nalexaDbPath = \"utils/Database/AlexaDB/top-1m.csv\"\r\n\r\nports = [80, 443, 21, 22, 23, 25, 53, 110, 143, 161, 445, 465, 587, 993, 995, 3306, 3389, 7547, 8080, 8888]\r\n\r\nfileHeader = \"Domain,DNSRecordType,MXDnsResponse,TXTDnsResponse,HasSPFInfo,HasDkimInfo,HasDmarcInfo,Ip,DomainInAlexaDB,CommonPorts,CountryCode,RegisteredCountry,CreationDate,\" \\\r\n \"LastUpdateDate,ASN,HttpResponseCode,RegisteredOrg,SubdomainNumber,Entropy,EntropyOfSubDomains,StrangeCharacters,\" \\\r\n \"TLD,IpReputation,DomainReputation,\" \\\r\n \"ConsoantRatio,NumericRatio,SpecialCharRatio,VowelRatio,ConsoantSequence,VowelSequence,NumericSequence,SpecialCharSequence,DomainLength,Class\"\r\n\r\nheaderRegex = \"%s,%s,%d,%d,%d,%d,%d,%s,%d,%d,%s,%s,%d,\" \\\r\n \"%d,%d,%d,%s,%d,%d,%d,%d,\" \\\r\n \"%s,%d,%d,\" \\\r\n \"%0.1f,%0.1f,%0.1f,%0.1f,%d,%d,%d,%d,%d,%d\\n\"\r\n\r\nsublist3rEngines = \"bing,passivedns\"\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def generator(factor, modulus=-1, maxx=2147483647):
def next(prev):
nxt = (prev*factor) % maxx
if modulus > 0:
while nxt % modulus != 0:
nxt = (nxt * factor) % maxx
return nxt
return next
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (0xFF << 8) + 0xFF
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][(mask & a) == (mask & b)]
return match
if __name__ == '__main__':
#example
#print(main(65, 8921))
#print(main(65,8921,4,8,2000))
#print(main(65,8921,4,8,5000000))
#PART 1
#print(main(634,301))
#PART 2
print(main(634,301,4,8,5000000))
|
normal
|
{
"blob_id": "6162911befc8ad37591f7c19b14b349c655ccac0",
"index": 3856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\n<mask token>\n",
"step-3": "def generator(factor, modulus=-1, maxx=2147483647):\n\n def next(prev):\n nxt = prev * factor % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = nxt * factor % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\n<mask token>\n",
"step-4": "def generator(factor, modulus=-1, maxx=2147483647):\n\n def next(prev):\n nxt = prev * factor % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = nxt * factor % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\nif __name__ == '__main__':\n print(main(634, 301, 4, 8, 5000000))\n",
"step-5": "def generator(factor, modulus=-1, maxx=2147483647):\n def next(prev):\n nxt = (prev*factor) % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = (nxt * factor) % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (0xFF << 8) + 0xFF\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][(mask & a) == (mask & b)]\n return match\n\nif __name__ == '__main__':\n #example\n #print(main(65, 8921))\n #print(main(65,8921,4,8,2000))\n #print(main(65,8921,4,8,5000000))\n \n #PART 1\n #print(main(634,301))\n\n #PART 2\n print(main(634,301,4,8,5000000))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv
import us
from flask import abort, Flask, request, render_template
app = Flask(__name__) # pylint: disable=invalid-name
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None),
'fips': request.args.get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
|
normal
|
{
"blob_id": "af00c6f443426b1f61e1816d7d14ebc7e6871a82",
"index": 5562,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n<mask token>\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n<mask token>\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\n<mask token>\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\n<mask token>\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-4": "import csv\nimport us\nfrom flask import abort, Flask, request, render_template\napp = Flask(__name__)\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-5": "import csv\nimport us\n\nfrom flask import abort, Flask, request, render_template\n\napp = Flask(__name__) # pylint: disable=invalid-name\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None),\n 'fips': request.args.get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-ids": [
34,
39,
40,
41,
42
]
}
|
[
34,
39,
40,
41,
42
] |
# Create two integer variables and print their sum. What is the type of the
# result?
# Now, create a float variable and print its sum with an integer variable. What
# is the type of the result.
# Divide your smallest integer value by your largest integer value. Is the
# result what you expected? Now, do the same with your float variable and an
# integer variable. What to you get?
# Fill in the blanks, try adding the following two string variables and print
# the result. What do you get?
greeting = "My name is "
your_name = ""
# Try adding the following variables.
best_string = "I am "
your_age = 6
# Although Python can add integers and floats, it can't add strings and integers.
# In order to do this, we need to convert the integer variable to a string using
# the str keyword
# Uncomment the line below and check that it works.
# print(best_string + str(your_age))
# You can create complex string by using multiple string additions.
# Uncomment the line below and see the result.
# print(best_string + str(your_age) + " years old")
# We can also use the float keyword and the int keyword to convert variables to
# floats and ints respectively.
my_int = 5
print(float(my_int))
# Now, convert pi to an int.
pi = 3.1415
|
normal
|
{
"blob_id": "fcbbffe0682da9f2131fdddbef606dcae3303ce9",
"index": 1979,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(float(my_int))\n<mask token>\n",
"step-3": "greeting = 'My name is '\nyour_name = ''\nbest_string = 'I am '\nyour_age = 6\nmy_int = 5\nprint(float(my_int))\npi = 3.1415\n",
"step-4": "# Create two integer variables and print their sum. What is the type of the\n# result?\n\n# Now, create a float variable and print its sum with an integer variable. What\n# is the type of the result.\n\n# Divide your smallest integer value by your largest integer value. Is the\n# result what you expected? Now, do the same with your float variable and an\n# integer variable. What to you get?\n\n# Fill in the blanks, try adding the following two string variables and print\n# the result. What do you get?\ngreeting = \"My name is \"\nyour_name = \"\"\n\n# Try adding the following variables.\nbest_string = \"I am \"\nyour_age = 6\n\n\n# Although Python can add integers and floats, it can't add strings and integers.\n# In order to do this, we need to convert the integer variable to a string using\n# the str keyword\n\n# Uncomment the line below and check that it works.\n# print(best_string + str(your_age))\n\n# You can create complex string by using multiple string additions.\n# Uncomment the line below and see the result.\n# print(best_string + str(your_age) + \" years old\")\n\n# We can also use the float keyword and the int keyword to convert variables to\n# floats and ints respectively.\n\nmy_int = 5\nprint(float(my_int))\n\n# Now, convert pi to an int.\n\npi = 3.1415\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from time import sleep
import RPi.GPIO as gpio
#GPIO.setmode(GPIO.BCM)
gpio.setwarnings(False)
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == "forward":
forward(tym)
stop(tym)
elif direction == "reverse":
reverse(tym)
stop(tym)
elif direction == "left":
turn_left(tym)
stop(tym)
elif direction == "right":
turn_right(tym)
stop(tym)
elif direction == "stop":
stop(tym)
else :
stop(tym)
if __name__ == '__main__':
import sys
drive((sys.argv[1]), float(sys.argv[2]))
gpio.cleanup()
##
##init()
##forward(0.6)
##sleep(1)
##reverse(0.6)
##sleep(1)
##turn_right(0.6)
##sleep(1)
##turn_left(0.6)
##stop(1)
|
normal
|
{
"blob_id": "a7cbd595b86908fb399bf11e1522588e0b0475c3",
"index": 9226,
"step-1": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-4": "<mask token>\ngpio.setwarnings(False)\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\nif __name__ == '__main__':\n import sys\n drive(sys.argv[1], float(sys.argv[2]))\n gpio.cleanup()\n",
"step-5": "from time import sleep\nimport RPi.GPIO as gpio\n#GPIO.setmode(GPIO.BCM)\ngpio.setwarnings(False)\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n \ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n \ndef drive(direction, tym):\n init()\n \n if direction == \"forward\":\n forward(tym)\n stop(tym)\n \n elif direction == \"reverse\":\n reverse(tym)\n stop(tym)\n\n elif direction == \"left\":\n turn_left(tym)\n stop(tym)\n\n elif direction == \"right\":\n turn_right(tym)\n stop(tym)\n\n elif direction == \"stop\":\n stop(tym)\n\n else :\n stop(tym)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tdrive((sys.argv[1]), float(sys.argv[2]))\n\tgpio.cleanup()\n\n##\n##init()\n##forward(0.6)\n##sleep(1)\n##reverse(0.6)\n##sleep(1)\n##turn_right(0.6)\n##sleep(1)\n##turn_left(0.6)\n##stop(1)\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
class Methodos(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 15)
<|reserved_special_token_0|>
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Methodos(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Methodos(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
def GetElementId(self, idtext):
return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))
<|reserved_special_token_1|>
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Methodos(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
def GetElementId(self, idtext):
return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))
<|reserved_special_token_1|>
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# driver = webdriver.Chrome('C:/automation/chromedriver')
# wait = WebDriverWait(driver, 15)
class Methodos(object):
def __init__(self,driver):
self.driver=driver
self.wait=WebDriverWait(self.driver, 15)
def SendText(self, _id, text):
e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))
e.clear()
e.send_keys(text)
self.driver.implicitly_wait(5)
def Click(self, id):
e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))
e.click()
def GetElementId(self,idtext):
return self.wait.until(EC.element_to_be_clickable(By.ID,idtext))
# def SendText(driver,wait,_id,text):
# e= wait.until(EC.element_to_be_clickable(By.ID,_id))
# e.clear()
# e.send_keys(text)
# driver.implicitly_wait(5)
# def Click(driver,wait,id):
# e=wait.until(EC.element_to_be_clickable((By.ID,id)))
# e.click()
|
flexible
|
{
"blob_id": "0a23b16329d8b599a4ee533604d316bdfe4b579a",
"index": 4832,
"step-1": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n <mask token>\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n\n def GetElementId(self, idtext):\n return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))\n",
"step-4": "from selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Methodos(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 15)\n\n def SendText(self, _id, text):\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\n e.clear()\n e.send_keys(text)\n self.driver.implicitly_wait(5)\n\n def Click(self, id):\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\n e.click()\n\n def GetElementId(self, idtext):\n return self.wait.until(EC.element_to_be_clickable(By.ID, idtext))\n",
"step-5": "from selenium.webdriver.common.keys import Keys\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\n# driver = webdriver.Chrome('C:/automation/chromedriver')\r\n# wait = WebDriverWait(driver, 15)\r\nclass Methodos(object):\r\n def __init__(self,driver):\r\n self.driver=driver\r\n self.wait=WebDriverWait(self.driver, 15)\r\n\r\n def SendText(self, _id, text):\r\n e = self.wait.until(EC.element_to_be_clickable(By.ID, _id))\r\n e.clear()\r\n e.send_keys(text)\r\n self.driver.implicitly_wait(5)\r\n\r\n def Click(self, id):\r\n e = self.wait.until(EC.element_to_be_clickable((By.ID, id)))\r\n e.click()\r\n\r\n\r\n def GetElementId(self,idtext):\r\n return self.wait.until(EC.element_to_be_clickable(By.ID,idtext))\r\n\r\n# def SendText(driver,wait,_id,text):\r\n# e= wait.until(EC.element_to_be_clickable(By.ID,_id))\r\n# e.clear()\r\n# e.send_keys(text)\r\n# driver.implicitly_wait(5)\r\n\r\n\r\n\r\n# def Click(driver,wait,id):\r\n# e=wait.until(EC.element_to_be_clickable((By.ID,id)))\r\n# e.click()\r\n\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
import sys
import setuptools
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if "=" in line:
exec(compile(line, "", 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email="fufu.bluesand@gmail.com",
description="a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smilefufu/PIKACHU",
data_files = [("", ["LICENSE"])],
packages=setuptools.find_packages(),
install_requires=[
"pika",
],
classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'
),
)
|
normal
|
{
"blob_id": "f14ff29a1a76c2916cb211c476a56aaa5061bf71",
"index": 8837,
"step-1": "<mask token>\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\n<mask token>\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n 'fufu.bluesand@gmail.com', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-3": "<mask token>\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n 'fufu.bluesand@gmail.com', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-4": "import sys\nimport setuptools\nfrom distutils.core import setup\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if '=' in line:\n exec(compile(line, '', 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\n\nNAME, AUTHOR, VERSION = get_info()\nsys.dont_write_bytecode = True\nsetuptools.setup(name=NAME, version=VERSION, author=AUTHOR, author_email=\n 'fufu.bluesand@gmail.com', description=\n 'a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/smilefufu/PIKACHU', data_files\n =[('', ['LICENSE'])], packages=setuptools.find_packages(),\n install_requires=['pika'], classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport sys\nimport setuptools\nfrom distutils.core import setup\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndef get_info():\n init_file = 'PIKACHU/__init__.py'\n with open(init_file, 'r') as f:\n for line in f.readlines():\n if \"=\" in line:\n exec(compile(line, \"\", 'exec'))\n return locals()['name'], locals()['author'], locals()['version']\n\nNAME, AUTHOR, VERSION = get_info()\n\nsys.dont_write_bytecode = True\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n author=AUTHOR,\n author_email=\"fufu.bluesand@gmail.com\",\n description=\"a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/smilefufu/PIKACHU\",\n data_files = [(\"\", [\"LICENSE\"])],\n packages=setuptools.find_packages(),\n install_requires=[\n \"pika\",\n ],\n classifiers=(\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent'\n ),\n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from home import views
from order import views as OV
urlpatterns = [
path('user', include('user.urls')),
path('order', include('order.urls')),
path('shopcart/', OV.shopcart, name='shopcart'),
path('product',include('product.urls')),
path('',include('home.urls')),# '' - bu home
path('faq/', views.faq, name='faq'),
path('admin/', admin.site.urls),
path('ckeditor', include('ckeditor_uploader.urls')),
path('about/', views.about, name='about'),
path('contact/', views.contact, name='about'),
path('search/', views.search,name='search'),
path('search_auto', views.search_auto, name='search_auto'),
path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),
path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),
path('lic/',views.lic,name='lic'),
path('post/',views.post,name='post'),
path('post/<int:id>/',views.post_detail, name='post_detail'),
path('lic/<int:id>/',views.lic_detail, name='lic_detail'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "97cc29e0d54e5d5e05dff16c92ecc4046363185f",
"index": 344,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\n\nurlpatterns = [\n path('user', include('user.urls')),\n path('order', include('order.urls')),\n path('shopcart/', OV.shopcart, name='shopcart'),\n path('product',include('product.urls')),\n path('',include('home.urls')),# '' - bu home\n path('faq/', views.faq, name='faq'),\n path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='about'),\n path('search/', views.search,name='search'),\n path('search_auto', views.search_auto, name='search_auto'),\n path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),\n path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),\n path('lic/',views.lic,name='lic'),\n path('post/',views.post,name='post'),\n path('post/<int:id>/',views.post_detail, name='post_detail'),\n path('lic/<int:id>/',views.lic_detail, name='lic_detail'),\n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Figure:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Figure:
<|reserved_special_token_0|>
def __new__(cls, *args):
if cls is Figure:
return None
return object.__new__(cls)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Figure:
<|reserved_special_token_0|>
def __new__(cls, *args):
if cls is Figure:
return None
return object.__new__(cls)
def add_area(self, other):
if isinstance(other, Figure):
return self.area + other.area
else:
raise ValueError('Should pass Figure as parameter')
<|reserved_special_token_1|>
class Figure:
area = 0
def __new__(cls, *args):
if cls is Figure:
return None
return object.__new__(cls)
def add_area(self, other):
if isinstance(other, Figure):
return self.area + other.area
else:
raise ValueError('Should pass Figure as parameter')
<|reserved_special_token_1|>
class Figure:
area = 0
def __new__(cls, *args):
if cls is Figure:
return None
return object.__new__(cls)
def add_area(self, other):
if isinstance(other, Figure):
return self.area + other.area
else:
raise ValueError("Should pass Figure as parameter")
|
flexible
|
{
"blob_id": "ceab21e41adf171e99e6c3c8541c418d82db6168",
"index": 3272,
"step-1": "class Figure:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Figure:\n <mask token>\n\n def __new__(cls, *args):\n if cls is Figure:\n return None\n return object.__new__(cls)\n <mask token>\n",
"step-3": "class Figure:\n <mask token>\n\n def __new__(cls, *args):\n if cls is Figure:\n return None\n return object.__new__(cls)\n\n def add_area(self, other):\n if isinstance(other, Figure):\n return self.area + other.area\n else:\n raise ValueError('Should pass Figure as parameter')\n",
"step-4": "class Figure:\n area = 0\n\n def __new__(cls, *args):\n if cls is Figure:\n return None\n return object.__new__(cls)\n\n def add_area(self, other):\n if isinstance(other, Figure):\n return self.area + other.area\n else:\n raise ValueError('Should pass Figure as parameter')\n",
"step-5": "class Figure:\n area = 0\n\n def __new__(cls, *args):\n if cls is Figure:\n return None\n return object.__new__(cls)\n\n def add_area(self, other):\n if isinstance(other, Figure):\n return self.area + other.area\n else:\n raise ValueError(\"Should pass Figure as parameter\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def index(request):
data = {}
return render(request, 'polls/index.html', data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
data = {}
return render(request, 'polls/index.html', data)
<|reserved_special_token_0|>
def searchShow(request):
if 'search' in request.GET:
search_string = request.GET['search']
context = {'search_string': search_string}
return render(request, 'polls/show.html', context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
data = {}
return render(request, 'polls/index.html', data)
def show(request):
return render(request, 'polls/show.html')
def searchShow(request):
if 'search' in request.GET:
search_string = request.GET['search']
context = {'search_string': search_string}
return render(request, 'polls/show.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from polls.models import Poll
from .serializers import PollSerializer
def index(request):
data = {}
return render(request, 'polls/index.html', data)
def show(request):
return render(request, 'polls/show.html')
def searchShow(request):
if 'search' in request.GET:
search_string = request.GET['search']
context = {'search_string': search_string}
return render(request, 'polls/show.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from polls.models import Poll
from .serializers import PollSerializer
# class PollView(APIView):
#
# def get(self, request):
# serializer = PollSerializer(Poll.objects.all(), many=True)
# response = {"polls": serializer.data}
# return Response(response, status=status.HTTP_200_OK)
#
# def post(self, request, format=None):
# data = request.data
# serializer = PollSerializer(data=data)
# if serializer.is_valid():
# poll = Poll(**data)
# poll.save()
# response = serializer.data
# return Response(response, status=status.HTTP_200_OK)
#
#
def index(request):
data = {}
return render(request,"polls/index.html",data)
#
# def show(request):
# data = {}
# p = Poll.objects.all()
# data["polls"] = p
# return render(request, "polls/show.html", data)
def show(request):
# data = {}
# p = Poll.objects.all()
# data["polls"] = p
return render(request, "polls/show.html")
def searchShow(request):
if 'search' in request.GET:
search_string = request.GET['search']
context = {
"search_string": search_string,
}
return render(request, "polls/show.html", context)
|
flexible
|
{
"blob_id": "866ff68744a16158b7917ca6defc35440208ae71",
"index": 8575,
"step-1": "<mask token>\n\n\ndef index(request):\n data = {}\n return render(request, 'polls/index.html', data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n data = {}\n return render(request, 'polls/index.html', data)\n\n\n<mask token>\n\n\ndef searchShow(request):\n if 'search' in request.GET:\n search_string = request.GET['search']\n context = {'search_string': search_string}\n return render(request, 'polls/show.html', context)\n",
"step-3": "<mask token>\n\n\ndef index(request):\n data = {}\n return render(request, 'polls/index.html', data)\n\n\ndef show(request):\n return render(request, 'polls/show.html')\n\n\ndef searchShow(request):\n if 'search' in request.GET:\n search_string = request.GET['search']\n context = {'search_string': search_string}\n return render(request, 'polls/show.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom polls.models import Poll\nfrom .serializers import PollSerializer\n\n\ndef index(request):\n data = {}\n return render(request, 'polls/index.html', data)\n\n\ndef show(request):\n return render(request, 'polls/show.html')\n\n\ndef searchShow(request):\n if 'search' in request.GET:\n search_string = request.GET['search']\n context = {'search_string': search_string}\n return render(request, 'polls/show.html', context)\n",
"step-5": "from django.shortcuts import render\n\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom polls.models import Poll\nfrom .serializers import PollSerializer\n\n\n# class PollView(APIView):\n#\n# def get(self, request):\n# serializer = PollSerializer(Poll.objects.all(), many=True)\n# response = {\"polls\": serializer.data}\n# return Response(response, status=status.HTTP_200_OK)\n#\n# def post(self, request, format=None):\n# data = request.data\n# serializer = PollSerializer(data=data)\n# if serializer.is_valid():\n# poll = Poll(**data)\n# poll.save()\n# response = serializer.data\n# return Response(response, status=status.HTTP_200_OK)\n#\n#\ndef index(request):\n data = {}\n return render(request,\"polls/index.html\",data)\n#\n# def show(request):\n# data = {}\n# p = Poll.objects.all()\n# data[\"polls\"] = p\n# return render(request, \"polls/show.html\", data)\n\ndef show(request):\n # data = {}\n # p = Poll.objects.all()\n # data[\"polls\"] = p\n return render(request, \"polls/show.html\")\n\n\n\ndef searchShow(request):\n if 'search' in request.GET:\n search_string = request.GET['search']\n context = {\n \"search_string\": search_string,\n }\n return render(request, \"polls/show.html\", context)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Import
import sys
from .step import Step
from .repeat import Repeat
# Workout
class Workout(object):
def __init__(self):
self.workout = []
self.steps = []
self.postfixEnabled = True
# TODO: check that len(name) <= 6
def addStep(self, name, duration):
self.workout.append(Step(name, duration))
# TODO: check that len(name) <= 6 - len(count)
def addRepeat(self, names, durations, count):
self.workout.append(Repeat(names, durations, count))
def generateCode(self, filename=None):
# Open
if not filename is None:
file = open(filename, 'w')
else:
file = sys.stdout
def wr(txt):
file.write(txt + '\n')
# Generate
wr('/* Reset */')
wr('if (SUUNTO_DURATION == 0) {')
wr(' STEP = 0;')
wr(' PREVSTEP = 0;')
wr(' STEPSTARTTIME = 0;')
wr(' STEPSTARTDIST = 0;')
wr(' STEPTIME = 0;')
wr(' STEPDIST = 0;')
wr('}')
wr('')
wr('/* Next step */')
wr('if (STEP != PREVSTEP) {')
wr(' Suunto.alarmBeep();')
wr(' STEPSTARTTIME = SUUNTO_DURATION;')
wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')
wr('}')
wr('')
wr('/* Update */')
wr('PREVSTEP = STEP;')
wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')
wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')
wr('')
step = 0
for w in self.workout:
step = w.generateCode(file,step,self.postfixEnabled)
wr('/* Check result */')
wr('if ( RESULT <= 0 ) {')
wr(' STEP = STEP + 1;')
wr(' RESULT = 0;')
wr('}')
# Close
if not filename is None:
file.close()
|
normal
|
{
"blob_id": "3f80c4c212259a8f3ff96bcc745fd28a85dac3ba",
"index": 8807,
"step-1": "<mask token>\n\n\nclass Workout(object):\n <mask token>\n <mask token>\n\n def addRepeat(self, names, durations, count):\n self.workout.append(Repeat(names, durations, count))\n\n def generateCode(self, filename=None):\n if not filename is None:\n file = open(filename, 'w')\n else:\n file = sys.stdout\n\n def wr(txt):\n file.write(txt + '\\n')\n wr('/* Reset */')\n wr('if (SUUNTO_DURATION == 0) {')\n wr(' STEP = 0;')\n wr(' PREVSTEP = 0;')\n wr(' STEPSTARTTIME = 0;')\n wr(' STEPSTARTDIST = 0;')\n wr(' STEPTIME = 0;')\n wr(' STEPDIST = 0;')\n wr('}')\n wr('')\n wr('/* Next step */')\n wr('if (STEP != PREVSTEP) {')\n wr(' Suunto.alarmBeep();')\n wr(' STEPSTARTTIME = SUUNTO_DURATION;')\n wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')\n wr('}')\n wr('')\n wr('/* Update */')\n wr('PREVSTEP = STEP;')\n wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')\n wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')\n wr('')\n step = 0\n for w in self.workout:\n step = w.generateCode(file, step, self.postfixEnabled)\n wr('/* Check result */')\n wr('if ( RESULT <= 0 ) {')\n wr(' STEP = STEP + 1;')\n wr(' RESULT = 0;')\n wr('}')\n if not filename is None:\n file.close()\n",
"step-2": "<mask token>\n\n\nclass Workout(object):\n <mask token>\n\n def addStep(self, name, duration):\n self.workout.append(Step(name, duration))\n\n def addRepeat(self, names, durations, count):\n self.workout.append(Repeat(names, durations, count))\n\n def generateCode(self, filename=None):\n if not filename is None:\n file = open(filename, 'w')\n else:\n file = sys.stdout\n\n def wr(txt):\n file.write(txt + '\\n')\n wr('/* Reset */')\n wr('if (SUUNTO_DURATION == 0) {')\n wr(' STEP = 0;')\n wr(' PREVSTEP = 0;')\n wr(' STEPSTARTTIME = 0;')\n wr(' STEPSTARTDIST = 0;')\n wr(' STEPTIME = 0;')\n wr(' STEPDIST = 0;')\n wr('}')\n wr('')\n wr('/* Next step */')\n wr('if (STEP != PREVSTEP) {')\n wr(' Suunto.alarmBeep();')\n wr(' STEPSTARTTIME = SUUNTO_DURATION;')\n wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')\n wr('}')\n wr('')\n wr('/* Update */')\n wr('PREVSTEP = STEP;')\n wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')\n wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')\n wr('')\n step = 0\n for w in self.workout:\n step = w.generateCode(file, step, self.postfixEnabled)\n wr('/* Check result */')\n wr('if ( RESULT <= 0 ) {')\n wr(' STEP = STEP + 1;')\n wr(' RESULT = 0;')\n wr('}')\n if not filename is None:\n file.close()\n",
"step-3": "<mask token>\n\n\nclass Workout(object):\n\n def __init__(self):\n self.workout = []\n self.steps = []\n self.postfixEnabled = True\n\n def addStep(self, name, duration):\n self.workout.append(Step(name, duration))\n\n def addRepeat(self, names, durations, count):\n self.workout.append(Repeat(names, durations, count))\n\n def generateCode(self, filename=None):\n if not filename is None:\n file = open(filename, 'w')\n else:\n file = sys.stdout\n\n def wr(txt):\n file.write(txt + '\\n')\n wr('/* Reset */')\n wr('if (SUUNTO_DURATION == 0) {')\n wr(' STEP = 0;')\n wr(' PREVSTEP = 0;')\n wr(' STEPSTARTTIME = 0;')\n wr(' STEPSTARTDIST = 0;')\n wr(' STEPTIME = 0;')\n wr(' STEPDIST = 0;')\n wr('}')\n wr('')\n wr('/* Next step */')\n wr('if (STEP != PREVSTEP) {')\n wr(' Suunto.alarmBeep();')\n wr(' STEPSTARTTIME = SUUNTO_DURATION;')\n wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')\n wr('}')\n wr('')\n wr('/* Update */')\n wr('PREVSTEP = STEP;')\n wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')\n wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')\n wr('')\n step = 0\n for w in self.workout:\n step = w.generateCode(file, step, self.postfixEnabled)\n wr('/* Check result */')\n wr('if ( RESULT <= 0 ) {')\n wr(' STEP = STEP + 1;')\n wr(' RESULT = 0;')\n wr('}')\n if not filename is None:\n file.close()\n",
"step-4": "import sys\nfrom .step import Step\nfrom .repeat import Repeat\n\n\nclass Workout(object):\n\n def __init__(self):\n self.workout = []\n self.steps = []\n self.postfixEnabled = True\n\n def addStep(self, name, duration):\n self.workout.append(Step(name, duration))\n\n def addRepeat(self, names, durations, count):\n self.workout.append(Repeat(names, durations, count))\n\n def generateCode(self, filename=None):\n if not filename is None:\n file = open(filename, 'w')\n else:\n file = sys.stdout\n\n def wr(txt):\n file.write(txt + '\\n')\n wr('/* Reset */')\n wr('if (SUUNTO_DURATION == 0) {')\n wr(' STEP = 0;')\n wr(' PREVSTEP = 0;')\n wr(' STEPSTARTTIME = 0;')\n wr(' STEPSTARTDIST = 0;')\n wr(' STEPTIME = 0;')\n wr(' STEPDIST = 0;')\n wr('}')\n wr('')\n wr('/* Next step */')\n wr('if (STEP != PREVSTEP) {')\n wr(' Suunto.alarmBeep();')\n wr(' STEPSTARTTIME = SUUNTO_DURATION;')\n wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')\n wr('}')\n wr('')\n wr('/* Update */')\n wr('PREVSTEP = STEP;')\n wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')\n wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')\n wr('')\n step = 0\n for w in self.workout:\n step = w.generateCode(file, step, self.postfixEnabled)\n wr('/* Check result */')\n wr('if ( RESULT <= 0 ) {')\n wr(' STEP = STEP + 1;')\n wr(' RESULT = 0;')\n wr('}')\n if not filename is None:\n file.close()\n",
"step-5": "# Import\nimport sys\nfrom .step import Step\nfrom .repeat import Repeat\n\n# Workout\nclass Workout(object):\n\n def __init__(self):\n self.workout = []\n self.steps = []\n self.postfixEnabled = True\n\n # TODO: check that len(name) <= 6\n def addStep(self, name, duration):\n self.workout.append(Step(name, duration))\n\n # TODO: check that len(name) <= 6 - len(count)\n def addRepeat(self, names, durations, count):\n self.workout.append(Repeat(names, durations, count))\n\n def generateCode(self, filename=None):\n\n # Open\n if not filename is None:\n file = open(filename, 'w')\n else:\n file = sys.stdout\n\n def wr(txt):\n file.write(txt + '\\n')\n\n # Generate\n wr('/* Reset */')\n wr('if (SUUNTO_DURATION == 0) {')\n wr(' STEP = 0;')\n wr(' PREVSTEP = 0;')\n wr(' STEPSTARTTIME = 0;')\n wr(' STEPSTARTDIST = 0;')\n wr(' STEPTIME = 0;')\n wr(' STEPDIST = 0;')\n wr('}')\n wr('')\n\n wr('/* Next step */')\n wr('if (STEP != PREVSTEP) {')\n wr(' Suunto.alarmBeep();')\n wr(' STEPSTARTTIME = SUUNTO_DURATION;')\n wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')\n wr('}')\n wr('')\n \n wr('/* Update */')\n wr('PREVSTEP = STEP;')\n wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')\n wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')\n wr('')\n\n step = 0\n for w in self.workout:\n step = w.generateCode(file,step,self.postfixEnabled)\n\n wr('/* Check result */')\n wr('if ( RESULT <= 0 ) {')\n wr(' STEP = STEP + 1;')\n wr(' RESULT = 0;')\n wr('}')\n\n # Close\n if not filename is None:\n file.close()\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['GSClient', 'GSPath']
<|reserved_special_token_1|>
from .gsclient import GSClient
from .gspath import GSPath
__all__ = ['GSClient', 'GSPath']
<|reserved_special_token_1|>
from .gsclient import GSClient
from .gspath import GSPath
__all__ = [
"GSClient",
"GSPath",
]
|
flexible
|
{
"blob_id": "7b726dd8ebbd5c49f9ce5bddb4779fcfbaaeb479",
"index": 5651,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['GSClient', 'GSPath']\n",
"step-3": "from .gsclient import GSClient\nfrom .gspath import GSPath\n__all__ = ['GSClient', 'GSPath']\n",
"step-4": "from .gsclient import GSClient\nfrom .gspath import GSPath\n\n__all__ = [\n \"GSClient\",\n \"GSPath\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from time import sleep
import RPi.GPIO as gpio
buzzer_pin = 18
gpio.setmode(gpio.BCM)
gpio.setup(buzzer_pin, gpio.OUT)
def buzz(pitch, duration):
peroid = 1.0 / pitch
delay = peroid / 2.0
cycles = int(duration * pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
pitch = float(1000)
duration = float(2)
buzz(pitch, duration)
|
normal
|
{
"blob_id": "149ac778a552fac4499d7146db8600c91c68c60e",
"index": 4479,
"step-1": "<mask token>\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\n<mask token>\n",
"step-2": "<mask token>\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\n<mask token>\nbuzz(pitch, duration)\n",
"step-3": "<mask token>\nbuzzer_pin = 18\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\npitch = float(1000)\nduration = float(2)\nbuzz(pitch, duration)\n",
"step-4": "from time import sleep\nimport RPi.GPIO as gpio\nbuzzer_pin = 18\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer_pin, gpio.OUT)\n\n\ndef buzz(pitch, duration):\n peroid = 1.0 / pitch\n delay = peroid / 2.0\n cycles = int(duration * pitch)\n for i in range(cycles):\n gpio.output(buzzer_pin, True)\n sleep(delay)\n gpio.output(buzzer_pin, False)\n sleep(delay)\n\n\npitch = float(1000)\nduration = float(2)\nbuzz(pitch, duration)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from datetime import datetime
from unittest import TestCase
from vpnmupd import versions
class TestClass01(TestCase):
"""Software dependency versions compared"""
def setUp(self) -> None:
super().setUp()
self.any_string = "Some string containing v1.1.1"
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, "1.1.1")
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power("1.1.1")
self.assertEqual(version, 111)
def test_case03(self):
"""Version power calculation compared"""
version1 = versions.get_version_power("1.1.1")
version2 = versions.get_version_power("0.2.1")
self.assertGreater(version1, version2)
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power("2021.1.1")
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power("2020.1.1")
version2 = versions.get_version_power("2021.1.1")
self.assertGreater(version2, version)
|
normal
|
{
"blob_id": "21d2de5719fafd94605f31bc07231644f4be18c5",
"index": 8749,
"step-1": "<mask token>\n\n\nclass TestClass01(TestCase):\n <mask token>\n <mask token>\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n <mask token>\n <mask token>\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-2": "<mask token>\n\n\nclass TestClass01(TestCase):\n <mask token>\n <mask token>\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n <mask token>\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-3": "<mask token>\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) ->None:\n super().setUp()\n self.any_string = 'Some string containing v1.1.1'\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power('1.1.1')\n version2 = versions.get_version_power('0.2.1')\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-4": "from datetime import datetime\nfrom unittest import TestCase\nfrom vpnmupd import versions\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) ->None:\n super().setUp()\n self.any_string = 'Some string containing v1.1.1'\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power('1.1.1')\n version2 = versions.get_version_power('0.2.1')\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-5": "from datetime import datetime\nfrom unittest import TestCase\n\nfrom vpnmupd import versions\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) -> None:\n super().setUp()\n self.any_string = \"Some string containing v1.1.1\"\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, \"1.1.1\")\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power(\"1.1.1\")\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power(\"2021.1.1\")\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power(\"2020.1.1\")\n version2 = versions.get_version_power(\"2021.1.1\")\n self.assertGreater(version2, version)\n",
"step-ids": [
4,
5,
8,
9,
10
]
}
|
[
4,
5,
8,
9,
10
] |
<|reserved_special_token_0|>
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(test_tool._task_storage_format, options.
task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage format CLI arguments helper."""
_EXPECTED_OUTPUT = (
"""usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]
Test argument parser.
{0:s}:
--storage_format FORMAT, --storage-format FORMAT
Format of the storage file, the default is: sqlite.
Supported options: sqlite
--task_storage_format FORMAT, --task-storage-format FORMAT
Format for task storage, the default is: sqlite.
Supported options: redis, sqlite
"""
.format(cli_test_lib.ARGPARSE_OPTIONS))
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(test_tool._task_storage_format, options.
task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage format CLI arguments helper."""
_EXPECTED_OUTPUT = (
"""usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]
Test argument parser.
{0:s}:
--storage_format FORMAT, --storage-format FORMAT
Format of the storage file, the default is: sqlite.
Supported options: sqlite
--task_storage_format FORMAT, --task-storage-format FORMAT
Format for task storage, the default is: sqlite.
Supported options: redis, sqlite
"""
.format(cli_test_lib.ARGPARSE_OPTIONS))
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(test_tool._task_storage_format, options.
task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import storage_format
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage format CLI arguments helper."""
_EXPECTED_OUTPUT = (
"""usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]
Test argument parser.
{0:s}:
--storage_format FORMAT, --storage-format FORMAT
Format of the storage file, the default is: sqlite.
Supported options: sqlite
--task_storage_format FORMAT, --task-storage-format FORMAT
Format for task storage, the default is: sqlite.
Supported options: redis, sqlite
"""
.format(cli_test_lib.ARGPARSE_OPTIONS))
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(test_tool._task_storage_format, options.
task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(options,
test_tool)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the storage format CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import storage_format
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage format CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]
Test argument parser.
{0:s}:
--storage_format FORMAT, --storage-format FORMAT
Format of the storage file, the default is: sqlite.
Supported options: sqlite
--task_storage_format FORMAT, --task-storage-format FORMAT
Format for task storage, the default is: sqlite.
Supported options: redis, sqlite
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(
test_tool._task_storage_format, options.task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options, None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(
options, test_tool)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "2075e7e05882524c295c8542ca7aefae2cf3e0fc",
"index": 5951,
"step-1": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n <mask token>\n <mask token>\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport argparse\nimport unittest\nfrom plaso.cli import tools\nfrom plaso.cli.helpers import storage_format\nfrom plaso.lib import errors\nfrom tests.cli import test_lib as cli_test_lib\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n\nimport argparse\nimport unittest\n\nfrom plaso.cli import tools\nfrom plaso.cli.helpers import storage_format\nfrom plaso.lib import errors\n\nfrom tests.cli import test_lib as cli_test_lib\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n\n # pylint: disable=no-member,protected-access\n\n _EXPECTED_OUTPUT = \"\"\"\\\nusage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\".format(cli_test_lib.ARGPARSE_OPTIONS)\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(\n prog='cli_helper.py', description='Test argument parser.',\n add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n\n storage_format.StorageFormatArgumentsHelper.AddArguments(argument_parser)\n\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, test_tool)\n\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(\n test_tool._task_storage_format, options.task_storage_format)\n\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, None)\n\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(\n options, test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def euro(number):
return f'{number:.2f} €'.replace('.', ',')
<|reserved_special_token_0|>
class Data:
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns
self.shape = 0, 0
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = len(data), len(data[0])
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[], decimal=',',
parse_dates=[], date_parser=None):
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = len(file_data), len(file_data[0])
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
self.data[self.columns[j]].append(col)
for col in self.columns:
setattr(self, col, self.data[col])
class Row:
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
"""
sorts the rows
"by" has to be a column name
"""
temp_data = [list(row) for i, row in self.iterrows()]
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
def to_html(self, filename, format_values={}, rename_columns={}, css=[],
column_align={}, caption=None, format_columns={}):
"""
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
"""
if len(self.data) == 0:
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' + '.center {text-align: center;}')
for style in css:
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class="{column_align[col]}">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def euro(number):
return f'{number:.2f} €'.replace('.', ',')
def date_s(date):
return str(date.strftime('%d.%m.%Y'))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data:
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns
self.shape = 0, 0
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = len(data), len(data[0])
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[], decimal=',',
parse_dates=[], date_parser=None):
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = len(file_data), len(file_data[0])
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
self.data[self.columns[j]].append(col)
for col in self.columns:
setattr(self, col, self.data[col])
class Row:
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
"""
sorts the rows
"by" has to be a column name
"""
temp_data = [list(row) for i, row in self.iterrows()]
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
def to_html(self, filename, format_values={}, rename_columns={}, css=[],
column_align={}, caption=None, format_columns={}):
"""
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
"""
if len(self.data) == 0:
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' + '.center {text-align: center;}')
for style in css:
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class="{column_align[col]}">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def euro(number):
return f'{number:.2f} €'.replace('.', ',')
def date_s(date):
return str(date.strftime('%d.%m.%Y'))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data:
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns
self.shape = 0, 0
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = len(data), len(data[0])
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[], decimal=',',
parse_dates=[], date_parser=None):
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = len(file_data), len(file_data[0])
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
self.data[self.columns[j]].append(col)
for col in self.columns:
setattr(self, col, self.data[col])
class Row:
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
"""
sorts the rows
"by" has to be a column name
"""
temp_data = [list(row) for i, row in self.iterrows()]
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
def to_html(self, filename, format_values={}, rename_columns={}, css=[],
column_align={}, caption=None, format_columns={}):
"""
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
"""
if len(self.data) == 0:
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' + '.center {text-align: center;}')
for style in css:
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class="{column_align[col]}">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename, head=True, column_names=['A', 'B', 'C', 'D',
'E'], parse_dates=['date'], date_parser=lambda x: datetime.datetime
.strptime(x, '%d.%m.%Y').date())
table_css = ['table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align: left; padding: 2px 6px 2px 6px;}']
data.to_html('temp/test.html', format_values={'payment': euro, 'date':
date_s}, format_columns={'payment': 'width=400px;'}, rename_columns
={'number': 'Number', 'name': 'Name', 'date': 'Date', 'payment':
'Payment'}, css=table_css, column_align={'payment': 'right'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} €'.replace('.', ',')
def date_s(date):
return str(date.strftime('%d.%m.%Y'))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data:
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns
self.shape = 0, 0
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = len(data), len(data[0])
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[], decimal=',',
parse_dates=[], date_parser=None):
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = len(file_data), len(file_data[0])
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
self.data[self.columns[j]].append(col)
for col in self.columns:
setattr(self, col, self.data[col])
class Row:
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
"""
sorts the rows
"by" has to be a column name
"""
temp_data = [list(row) for i, row in self.iterrows()]
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
def to_html(self, filename, format_values={}, rename_columns={}, css=[],
column_align={}, caption=None, format_columns={}):
"""
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
"""
if len(self.data) == 0:
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' + '.center {text-align: center;}')
for style in css:
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class="{column_align[col]}">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename, head=True, column_names=['A', 'B', 'C', 'D',
'E'], parse_dates=['date'], date_parser=lambda x: datetime.datetime
.strptime(x, '%d.%m.%Y').date())
table_css = ['table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align: left; padding: 2px 6px 2px 6px;}']
data.to_html('temp/test.html', format_values={'payment': euro, 'date':
date_s}, format_columns={'payment': 'width=400px;'}, rename_columns
={'number': 'Number', 'name': 'Name', 'date': 'Date', 'payment':
'Payment'}, css=table_css, column_align={'payment': 'right'})
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 11:51:49 2019
@author: Christian Post
"""
# TODO: row index as an attribute of Data?
# make iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} €'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align: left; padding: 2px 6px 2px 6px;}'
]
data.to_html('temp/test.html',
format_values={'payment': euro,
'date': date_s},
format_columns={'payment': 'width=400px;'},
rename_columns={'number': 'Number',
'name': 'Name',
'date': 'Date',
'payment': 'Payment'},
css=table_css,
column_align={'payment': 'right'})
#data.write_csv('test.csv')
|
flexible
|
{
"blob_id": "8db952ba5bf42443da89f4064caf012036471541",
"index": 2307,
"step-1": "<mask token>\n\n\ndef euro(number):\n return f'{number:.2f} €'.replace('.', ',')\n\n\n<mask token>\n\n\nclass Data:\n\n def __init__(self, data=None, columns=[]):\n self.data = {}\n self.columns = columns\n self.shape = 0, 0\n if data:\n if columns:\n for i in range(len(data[0])):\n self.data[self.columns[i]] = []\n else:\n for i in range(len(data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(data):\n for j, col in enumerate(row):\n self.data[self.columns[j]].append(col)\n self.shape = len(data), len(data[0])\n print(self.data)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n def write_csv(self, filename, decimal=',', sep=';', head=True):\n with open(filename, 'w+', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=sep)\n if head:\n writer.writerow(self.columns)\n for i, row in self.iterrows():\n str_row = [str(r).replace('.', decimal) for r in row]\n writer.writerow(str_row)\n\n def read_csv(self, filename, head=True, column_names=[], decimal=',',\n parse_dates=[], date_parser=None):\n if not os.path.isfile(filename):\n print(f'Error: \"{filename}\" does not exist.')\n return\n file_data = []\n try:\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n file_data.append(row)\n except csv.Error:\n print(f'Error: Could not read \"{filename}\"')\n return\n if len(file_data) == 0:\n print(f'Error: \"{filename}\" does not contain any data.')\n return\n self.shape = len(file_data), len(file_data[0])\n if column_names and len(column_names) != self.shape[1]:\n print('Error: Mismatching length of column names ' +\n f'(Got {len(column_names)} instead of {self.shape[1]}).')\n return\n if head and not column_names:\n self.columns = file_data[0]\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif head and column_names:\n self.columns = list(column_names)\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif not head and column_names:\n self.columns = list(column_names)\n for col in self.columns:\n self.data[col] = []\n else:\n for i in range(len(file_data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(file_data):\n for j, col in enumerate(row):\n if col == 'True':\n self.data[self.columns[j]].append(True)\n continue\n elif col == 'False':\n self.data[self.columns[j]].append(False)\n continue\n if parse_dates and self.columns[j] in parse_dates:\n self.data[self.columns[j]].append(date_parser(col))\n continue\n value = col.replace(decimal, '.')\n try:\n value = float(value)\n if value.is_integer():\n self.data[self.columns[j]].append(int(value))\n else:\n self.data[self.columns[j]].append(value)\n except ValueError:\n self.data[self.columns[j]].append(col)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n\n class Row:\n\n def __init__(self, data, columns):\n self.data = data\n self.columns = columns\n for i, col in enumerate(self.columns):\n setattr(self, col, data[i])\n\n def __getitem__(self, key):\n return self.data[self.columns.index(key)]\n\n def __iter__(self):\n return iter(self.data)\n\n def iterrows(self):\n v = list(self.data.values())\n if len(v) == 0:\n return\n i = 0\n while i < len(v[0]):\n data = []\n for col in v:\n data.append(col[i])\n row = self.Row(data, self.columns)\n yield i, row\n i += 1\n\n def sort(self, by=None, reverse=False):\n \"\"\"\n sorts the rows\n \"by\" has to be a column name\n \"\"\"\n temp_data = [list(row) for i, row in self.iterrows()]\n if not by or by not in self.columns:\n i = 0\n else:\n i = self.columns.index(by)\n temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)\n for i, row in enumerate(temp_data):\n for j, col in enumerate(row):\n self.data[self.columns[j]][i] = col\n\n def to_html(self, filename, format_values={}, rename_columns={}, css=[],\n column_align={}, caption=None, format_columns={}):\n \"\"\"\n construct a html table out of this objects's data\n filename is a valid *.html or *.htm filename\n format_values is a dictionary with column names as keys\n and functions as values that take a single value as an argument\n and return the formatted (or otherwise processed) value\n rename_columns is a dictionary with pairs of\n current col name: new col name\n css is a list of css elements that are inserted into the\n <style> tag\n column_align is a dict with column name: align (left, right, center)\n caption specifies the table's caption\n format_columns is a dictionary with format options for the respective\n columns\n \"\"\"\n if len(self.data) == 0:\n print('HTML building aborted: No data')\n return\n if filename[-4:] != 'html' and filename[-3:] != 'htm':\n print(f'Error: \"{filename}\" is not a valid html file')\n return\n strTable = '<html><head><style>'\n strTable += ('.right {text-align: right;} ' +\n '.left {text-align: left;} ' + '.center {text-align: center;}')\n for style in css:\n strTable += style\n strTable += '</style></head><body><table>'\n if caption:\n strTable += f'<caption>{caption}</caption>'\n strTable += '<tr>'\n for col in self.columns:\n if col in rename_columns.keys():\n col = rename_columns[col]\n strTable += f'<th>{col}</th>'\n strTable += '</tr>'\n for i, row in self.iterrows():\n strRW = '<tr>'\n for col in self.columns:\n strTD = '<td '\n value = row[col]\n if col in format_values.keys():\n value = format_values[col](value)\n if col in format_columns.keys():\n strTD += format_columns[col]\n if col in column_align.keys():\n strTD += f' class=\"{column_align[col]}\">{value}'\n else:\n strTD += f'>{value}'\n strTD += '</td>'\n strRW += strTD\n strRW += '</tr>'\n strTable += strRW\n strTable += '</table></body></html>'\n with open(filename, 'w') as html_file:\n html_file.write(strTable)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef euro(number):\n return f'{number:.2f} €'.replace('.', ',')\n\n\ndef date_s(date):\n return str(date.strftime('%d.%m.%Y'))\n\n\ndef convert_to_date(date):\n if type(date) == datetime.date:\n return date\n else:\n return date.date()\n\n\nclass Data:\n\n def __init__(self, data=None, columns=[]):\n self.data = {}\n self.columns = columns\n self.shape = 0, 0\n if data:\n if columns:\n for i in range(len(data[0])):\n self.data[self.columns[i]] = []\n else:\n for i in range(len(data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(data):\n for j, col in enumerate(row):\n self.data[self.columns[j]].append(col)\n self.shape = len(data), len(data[0])\n print(self.data)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n def write_csv(self, filename, decimal=',', sep=';', head=True):\n with open(filename, 'w+', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=sep)\n if head:\n writer.writerow(self.columns)\n for i, row in self.iterrows():\n str_row = [str(r).replace('.', decimal) for r in row]\n writer.writerow(str_row)\n\n def read_csv(self, filename, head=True, column_names=[], decimal=',',\n parse_dates=[], date_parser=None):\n if not os.path.isfile(filename):\n print(f'Error: \"{filename}\" does not exist.')\n return\n file_data = []\n try:\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n file_data.append(row)\n except csv.Error:\n print(f'Error: Could not read \"{filename}\"')\n return\n if len(file_data) == 0:\n print(f'Error: \"{filename}\" does not contain any data.')\n return\n self.shape = len(file_data), len(file_data[0])\n if column_names and len(column_names) != self.shape[1]:\n print('Error: Mismatching length of column names ' +\n f'(Got {len(column_names)} instead of {self.shape[1]}).')\n return\n if head and not column_names:\n self.columns = file_data[0]\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif head and column_names:\n self.columns = list(column_names)\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif not head and column_names:\n self.columns = list(column_names)\n for col in self.columns:\n self.data[col] = []\n else:\n for i in range(len(file_data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(file_data):\n for j, col in enumerate(row):\n if col == 'True':\n self.data[self.columns[j]].append(True)\n continue\n elif col == 'False':\n self.data[self.columns[j]].append(False)\n continue\n if parse_dates and self.columns[j] in parse_dates:\n self.data[self.columns[j]].append(date_parser(col))\n continue\n value = col.replace(decimal, '.')\n try:\n value = float(value)\n if value.is_integer():\n self.data[self.columns[j]].append(int(value))\n else:\n self.data[self.columns[j]].append(value)\n except ValueError:\n self.data[self.columns[j]].append(col)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n\n class Row:\n\n def __init__(self, data, columns):\n self.data = data\n self.columns = columns\n for i, col in enumerate(self.columns):\n setattr(self, col, data[i])\n\n def __getitem__(self, key):\n return self.data[self.columns.index(key)]\n\n def __iter__(self):\n return iter(self.data)\n\n def iterrows(self):\n v = list(self.data.values())\n if len(v) == 0:\n return\n i = 0\n while i < len(v[0]):\n data = []\n for col in v:\n data.append(col[i])\n row = self.Row(data, self.columns)\n yield i, row\n i += 1\n\n def sort(self, by=None, reverse=False):\n \"\"\"\n sorts the rows\n \"by\" has to be a column name\n \"\"\"\n temp_data = [list(row) for i, row in self.iterrows()]\n if not by or by not in self.columns:\n i = 0\n else:\n i = self.columns.index(by)\n temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)\n for i, row in enumerate(temp_data):\n for j, col in enumerate(row):\n self.data[self.columns[j]][i] = col\n\n def to_html(self, filename, format_values={}, rename_columns={}, css=[],\n column_align={}, caption=None, format_columns={}):\n \"\"\"\n construct a html table out of this objects's data\n filename is a valid *.html or *.htm filename\n format_values is a dictionary with column names as keys\n and functions as values that take a single value as an argument\n and return the formatted (or otherwise processed) value\n rename_columns is a dictionary with pairs of\n current col name: new col name\n css is a list of css elements that are inserted into the\n <style> tag\n column_align is a dict with column name: align (left, right, center)\n caption specifies the table's caption\n format_columns is a dictionary with format options for the respective\n columns\n \"\"\"\n if len(self.data) == 0:\n print('HTML building aborted: No data')\n return\n if filename[-4:] != 'html' and filename[-3:] != 'htm':\n print(f'Error: \"{filename}\" is not a valid html file')\n return\n strTable = '<html><head><style>'\n strTable += ('.right {text-align: right;} ' +\n '.left {text-align: left;} ' + '.center {text-align: center;}')\n for style in css:\n strTable += style\n strTable += '</style></head><body><table>'\n if caption:\n strTable += f'<caption>{caption}</caption>'\n strTable += '<tr>'\n for col in self.columns:\n if col in rename_columns.keys():\n col = rename_columns[col]\n strTable += f'<th>{col}</th>'\n strTable += '</tr>'\n for i, row in self.iterrows():\n strRW = '<tr>'\n for col in self.columns:\n strTD = '<td '\n value = row[col]\n if col in format_values.keys():\n value = format_values[col](value)\n if col in format_columns.keys():\n strTD += format_columns[col]\n if col in column_align.keys():\n strTD += f' class=\"{column_align[col]}\">{value}'\n else:\n strTD += f'>{value}'\n strTD += '</td>'\n strRW += strTD\n strRW += '</tr>'\n strTable += strRW\n strTable += '</table></body></html>'\n with open(filename, 'w') as html_file:\n html_file.write(strTable)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef euro(number):\n return f'{number:.2f} €'.replace('.', ',')\n\n\ndef date_s(date):\n return str(date.strftime('%d.%m.%Y'))\n\n\ndef convert_to_date(date):\n if type(date) == datetime.date:\n return date\n else:\n return date.date()\n\n\nclass Data:\n\n def __init__(self, data=None, columns=[]):\n self.data = {}\n self.columns = columns\n self.shape = 0, 0\n if data:\n if columns:\n for i in range(len(data[0])):\n self.data[self.columns[i]] = []\n else:\n for i in range(len(data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(data):\n for j, col in enumerate(row):\n self.data[self.columns[j]].append(col)\n self.shape = len(data), len(data[0])\n print(self.data)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n def write_csv(self, filename, decimal=',', sep=';', head=True):\n with open(filename, 'w+', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=sep)\n if head:\n writer.writerow(self.columns)\n for i, row in self.iterrows():\n str_row = [str(r).replace('.', decimal) for r in row]\n writer.writerow(str_row)\n\n def read_csv(self, filename, head=True, column_names=[], decimal=',',\n parse_dates=[], date_parser=None):\n if not os.path.isfile(filename):\n print(f'Error: \"{filename}\" does not exist.')\n return\n file_data = []\n try:\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n file_data.append(row)\n except csv.Error:\n print(f'Error: Could not read \"{filename}\"')\n return\n if len(file_data) == 0:\n print(f'Error: \"{filename}\" does not contain any data.')\n return\n self.shape = len(file_data), len(file_data[0])\n if column_names and len(column_names) != self.shape[1]:\n print('Error: Mismatching length of column names ' +\n f'(Got {len(column_names)} instead of {self.shape[1]}).')\n return\n if head and not column_names:\n self.columns = file_data[0]\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif head and column_names:\n self.columns = list(column_names)\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif not head and column_names:\n self.columns = list(column_names)\n for col in self.columns:\n self.data[col] = []\n else:\n for i in range(len(file_data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(file_data):\n for j, col in enumerate(row):\n if col == 'True':\n self.data[self.columns[j]].append(True)\n continue\n elif col == 'False':\n self.data[self.columns[j]].append(False)\n continue\n if parse_dates and self.columns[j] in parse_dates:\n self.data[self.columns[j]].append(date_parser(col))\n continue\n value = col.replace(decimal, '.')\n try:\n value = float(value)\n if value.is_integer():\n self.data[self.columns[j]].append(int(value))\n else:\n self.data[self.columns[j]].append(value)\n except ValueError:\n self.data[self.columns[j]].append(col)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n\n class Row:\n\n def __init__(self, data, columns):\n self.data = data\n self.columns = columns\n for i, col in enumerate(self.columns):\n setattr(self, col, data[i])\n\n def __getitem__(self, key):\n return self.data[self.columns.index(key)]\n\n def __iter__(self):\n return iter(self.data)\n\n def iterrows(self):\n v = list(self.data.values())\n if len(v) == 0:\n return\n i = 0\n while i < len(v[0]):\n data = []\n for col in v:\n data.append(col[i])\n row = self.Row(data, self.columns)\n yield i, row\n i += 1\n\n def sort(self, by=None, reverse=False):\n \"\"\"\n sorts the rows\n \"by\" has to be a column name\n \"\"\"\n temp_data = [list(row) for i, row in self.iterrows()]\n if not by or by not in self.columns:\n i = 0\n else:\n i = self.columns.index(by)\n temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)\n for i, row in enumerate(temp_data):\n for j, col in enumerate(row):\n self.data[self.columns[j]][i] = col\n\n def to_html(self, filename, format_values={}, rename_columns={}, css=[],\n column_align={}, caption=None, format_columns={}):\n \"\"\"\n construct a html table out of this objects's data\n filename is a valid *.html or *.htm filename\n format_values is a dictionary with column names as keys\n and functions as values that take a single value as an argument\n and return the formatted (or otherwise processed) value\n rename_columns is a dictionary with pairs of\n current col name: new col name\n css is a list of css elements that are inserted into the\n <style> tag\n column_align is a dict with column name: align (left, right, center)\n caption specifies the table's caption\n format_columns is a dictionary with format options for the respective\n columns\n \"\"\"\n if len(self.data) == 0:\n print('HTML building aborted: No data')\n return\n if filename[-4:] != 'html' and filename[-3:] != 'htm':\n print(f'Error: \"{filename}\" is not a valid html file')\n return\n strTable = '<html><head><style>'\n strTable += ('.right {text-align: right;} ' +\n '.left {text-align: left;} ' + '.center {text-align: center;}')\n for style in css:\n strTable += style\n strTable += '</style></head><body><table>'\n if caption:\n strTable += f'<caption>{caption}</caption>'\n strTable += '<tr>'\n for col in self.columns:\n if col in rename_columns.keys():\n col = rename_columns[col]\n strTable += f'<th>{col}</th>'\n strTable += '</tr>'\n for i, row in self.iterrows():\n strRW = '<tr>'\n for col in self.columns:\n strTD = '<td '\n value = row[col]\n if col in format_values.keys():\n value = format_values[col](value)\n if col in format_columns.keys():\n strTD += format_columns[col]\n if col in column_align.keys():\n strTD += f' class=\"{column_align[col]}\">{value}'\n else:\n strTD += f'>{value}'\n strTD += '</td>'\n strRW += strTD\n strRW += '</tr>'\n strTable += strRW\n strTable += '</table></body></html>'\n with open(filename, 'w') as html_file:\n html_file.write(strTable)\n\n\nif __name__ == '__main__':\n file_path = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(file_path, 'exported_csv', 'staff.csv')\n data = Data()\n data.read_csv(filename, head=True, column_names=['A', 'B', 'C', 'D',\n 'E'], parse_dates=['date'], date_parser=lambda x: datetime.datetime\n .strptime(x, '%d.%m.%Y').date())\n table_css = ['table {border-collapse: collapse;}',\n 'table, th, td {border: 1px solid black;}',\n 'th, td {text-align: left; padding: 2px 6px 2px 6px;}']\n data.to_html('temp/test.html', format_values={'payment': euro, 'date':\n date_s}, format_columns={'payment': 'width=400px;'}, rename_columns\n ={'number': 'Number', 'name': 'Name', 'date': 'Date', 'payment':\n 'Payment'}, css=table_css, column_align={'payment': 'right'})\n",
"step-4": "<mask token>\nimport csv\nimport os\nimport datetime\n\n\ndef euro(number):\n return f'{number:.2f} €'.replace('.', ',')\n\n\ndef date_s(date):\n return str(date.strftime('%d.%m.%Y'))\n\n\ndef convert_to_date(date):\n if type(date) == datetime.date:\n return date\n else:\n return date.date()\n\n\nclass Data:\n\n def __init__(self, data=None, columns=[]):\n self.data = {}\n self.columns = columns\n self.shape = 0, 0\n if data:\n if columns:\n for i in range(len(data[0])):\n self.data[self.columns[i]] = []\n else:\n for i in range(len(data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(data):\n for j, col in enumerate(row):\n self.data[self.columns[j]].append(col)\n self.shape = len(data), len(data[0])\n print(self.data)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n def write_csv(self, filename, decimal=',', sep=';', head=True):\n with open(filename, 'w+', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=sep)\n if head:\n writer.writerow(self.columns)\n for i, row in self.iterrows():\n str_row = [str(r).replace('.', decimal) for r in row]\n writer.writerow(str_row)\n\n def read_csv(self, filename, head=True, column_names=[], decimal=',',\n parse_dates=[], date_parser=None):\n if not os.path.isfile(filename):\n print(f'Error: \"{filename}\" does not exist.')\n return\n file_data = []\n try:\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n for row in reader:\n file_data.append(row)\n except csv.Error:\n print(f'Error: Could not read \"{filename}\"')\n return\n if len(file_data) == 0:\n print(f'Error: \"{filename}\" does not contain any data.')\n return\n self.shape = len(file_data), len(file_data[0])\n if column_names and len(column_names) != self.shape[1]:\n print('Error: Mismatching length of column names ' +\n f'(Got {len(column_names)} instead of {self.shape[1]}).')\n return\n if head and not column_names:\n self.columns = file_data[0]\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif head and column_names:\n self.columns = list(column_names)\n file_data = file_data[1:]\n for col in self.columns:\n self.data[col] = []\n elif not head and column_names:\n self.columns = list(column_names)\n for col in self.columns:\n self.data[col] = []\n else:\n for i in range(len(file_data[0])):\n self.columns.append(str(i))\n self.data[str(i)] = []\n for i, row in enumerate(file_data):\n for j, col in enumerate(row):\n if col == 'True':\n self.data[self.columns[j]].append(True)\n continue\n elif col == 'False':\n self.data[self.columns[j]].append(False)\n continue\n if parse_dates and self.columns[j] in parse_dates:\n self.data[self.columns[j]].append(date_parser(col))\n continue\n value = col.replace(decimal, '.')\n try:\n value = float(value)\n if value.is_integer():\n self.data[self.columns[j]].append(int(value))\n else:\n self.data[self.columns[j]].append(value)\n except ValueError:\n self.data[self.columns[j]].append(col)\n for col in self.columns:\n setattr(self, col, self.data[col])\n\n\n class Row:\n\n def __init__(self, data, columns):\n self.data = data\n self.columns = columns\n for i, col in enumerate(self.columns):\n setattr(self, col, data[i])\n\n def __getitem__(self, key):\n return self.data[self.columns.index(key)]\n\n def __iter__(self):\n return iter(self.data)\n\n def iterrows(self):\n v = list(self.data.values())\n if len(v) == 0:\n return\n i = 0\n while i < len(v[0]):\n data = []\n for col in v:\n data.append(col[i])\n row = self.Row(data, self.columns)\n yield i, row\n i += 1\n\n def sort(self, by=None, reverse=False):\n \"\"\"\n sorts the rows\n \"by\" has to be a column name\n \"\"\"\n temp_data = [list(row) for i, row in self.iterrows()]\n if not by or by not in self.columns:\n i = 0\n else:\n i = self.columns.index(by)\n temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)\n for i, row in enumerate(temp_data):\n for j, col in enumerate(row):\n self.data[self.columns[j]][i] = col\n\n def to_html(self, filename, format_values={}, rename_columns={}, css=[],\n column_align={}, caption=None, format_columns={}):\n \"\"\"\n construct a html table out of this objects's data\n filename is a valid *.html or *.htm filename\n format_values is a dictionary with column names as keys\n and functions as values that take a single value as an argument\n and return the formatted (or otherwise processed) value\n rename_columns is a dictionary with pairs of\n current col name: new col name\n css is a list of css elements that are inserted into the\n <style> tag\n column_align is a dict with column name: align (left, right, center)\n caption specifies the table's caption\n format_columns is a dictionary with format options for the respective\n columns\n \"\"\"\n if len(self.data) == 0:\n print('HTML building aborted: No data')\n return\n if filename[-4:] != 'html' and filename[-3:] != 'htm':\n print(f'Error: \"{filename}\" is not a valid html file')\n return\n strTable = '<html><head><style>'\n strTable += ('.right {text-align: right;} ' +\n '.left {text-align: left;} ' + '.center {text-align: center;}')\n for style in css:\n strTable += style\n strTable += '</style></head><body><table>'\n if caption:\n strTable += f'<caption>{caption}</caption>'\n strTable += '<tr>'\n for col in self.columns:\n if col in rename_columns.keys():\n col = rename_columns[col]\n strTable += f'<th>{col}</th>'\n strTable += '</tr>'\n for i, row in self.iterrows():\n strRW = '<tr>'\n for col in self.columns:\n strTD = '<td '\n value = row[col]\n if col in format_values.keys():\n value = format_values[col](value)\n if col in format_columns.keys():\n strTD += format_columns[col]\n if col in column_align.keys():\n strTD += f' class=\"{column_align[col]}\">{value}'\n else:\n strTD += f'>{value}'\n strTD += '</td>'\n strRW += strTD\n strRW += '</tr>'\n strTable += strRW\n strTable += '</table></body></html>'\n with open(filename, 'w') as html_file:\n html_file.write(strTable)\n\n\nif __name__ == '__main__':\n file_path = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(file_path, 'exported_csv', 'staff.csv')\n data = Data()\n data.read_csv(filename, head=True, column_names=['A', 'B', 'C', 'D',\n 'E'], parse_dates=['date'], date_parser=lambda x: datetime.datetime\n .strptime(x, '%d.%m.%Y').date())\n table_css = ['table {border-collapse: collapse;}',\n 'table, th, td {border: 1px solid black;}',\n 'th, td {text-align: left; padding: 2px 6px 2px 6px;}']\n data.to_html('temp/test.html', format_values={'payment': euro, 'date':\n date_s}, format_columns={'payment': 'width=400px;'}, rename_columns\n ={'number': 'Number', 'name': 'Name', 'date': 'Date', 'payment':\n 'Payment'}, css=table_css, column_align={'payment': 'right'})\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 8 11:51:49 2019\r\n\r\n@author: Christian Post\r\n\"\"\"\r\n# TODO: row index as an attribute of Data?\r\n# make iterrows return a row object to access column names for each row\r\n\r\n\r\nimport csv\r\nimport os\r\nimport datetime\r\n\r\n\r\ndef euro(number):\r\n return f'{number:.2f} €'.replace('.',',')\r\n\r\n\r\ndef date_s(date):\r\n # accepts datetime, returns formatted string\r\n return str(date.strftime(\"%d.%m.%Y\"))\r\n\r\n\r\ndef convert_to_date(date):\r\n if type(date) == datetime.date:\r\n return date\r\n else:\r\n return date.date()\r\n\r\n\r\n\r\nclass Data():\r\n def __init__(self, data=None, columns=[]):\r\n self.data = {}\r\n self.columns = columns # column names\r\n self.shape = (0, 0)\r\n if data:\r\n if columns:\r\n for i in range(len(data[0])):\r\n self.data[self.columns[i]] = []\r\n else:\r\n for i in range(len(data[0])):\r\n self.columns.append(str(i))\r\n self.data[str(i)] = []\r\n\r\n for i, row in enumerate(data):\r\n for j, col in enumerate(row):\r\n self.data[self.columns[j]].append(col)\r\n self.shape = (len(data), len(data[0]))\r\n print(self.data)\r\n for col in self.columns:\r\n setattr(self, col, self.data[col])\r\n \r\n\r\n def write_csv(self, filename, decimal=',', sep=';', head=True):\r\n # writes self.data to a give csv file\r\n with open(filename, 'w+', newline='') as csvfile:\r\n writer = csv.writer(csvfile, delimiter=sep)\r\n if head:\r\n writer.writerow(self.columns)\r\n for i, row in self.iterrows():\r\n str_row = [str(r).replace('.', decimal) for r in row]\r\n writer.writerow(str_row)\r\n\r\n\r\n def read_csv(self, filename, head=True, column_names=[],\r\n decimal=',', parse_dates=[], date_parser=None):\r\n # make an array to store the csv data with shape (rows, columns)\r\n if not os.path.isfile(filename):\r\n print(f'Error: \"{filename}\" does not exist.')\r\n return\r\n file_data = []\r\n try:\r\n with open(filename, 'r') as csvfile:\r\n reader = csv.reader(csvfile, delimiter=';')\r\n for row in reader:\r\n file_data.append(row)\r\n except csv.Error:\r\n print(f'Error: Could not read \"{filename}\"')\r\n return\r\n if len(file_data) == 0:\r\n print(f'Error: \"{filename}\" does not contain any data.')\r\n return\r\n \r\n self.shape = (len(file_data), len(file_data[0]))\r\n if column_names and len(column_names) != self.shape[1]:\r\n print('Error: Mismatching length of column names ' +\r\n f'(Got {len(column_names)} instead of {self.shape[1]}).')\r\n return\r\n \r\n if head and not column_names:\r\n # set or store column names\r\n self.columns = file_data[0]\r\n file_data = file_data[1:]\r\n for col in self.columns:\r\n self.data[col] = []\r\n elif head and column_names:\r\n # TODO: check if len of column names is compatible\r\n self.columns = list(column_names)\r\n file_data = file_data[1:]\r\n for col in self.columns:\r\n self.data[col] = []\r\n elif not head and column_names:\r\n self.columns = list(column_names)\r\n for col in self.columns:\r\n self.data[col] = []\r\n else:\r\n for i in range(len(file_data[0])):\r\n self.columns.append(str(i))\r\n self.data[str(i)] = []\r\n \r\n \r\n for i, row in enumerate(file_data):\r\n for j, col in enumerate(row):\r\n # check if data is boolean\r\n if col == 'True':\r\n self.data[self.columns[j]].append(True)\r\n continue\r\n elif col == 'False':\r\n self.data[self.columns[j]].append(False)\r\n continue\r\n \r\n # check if data is date\r\n if parse_dates and self.columns[j] in parse_dates:\r\n self.data[self.columns[j]].append(date_parser(col))\r\n continue\r\n \r\n # convert numbers to float or int\r\n value = col.replace(decimal, '.')\r\n try:\r\n value = float(value)\r\n if value.is_integer():\r\n self.data[self.columns[j]].append(int(value))\r\n else:\r\n self.data[self.columns[j]].append(value)\r\n except ValueError:\r\n # data is not a number\r\n self.data[self.columns[j]].append(col)\r\n # set attributes of data object based on column names\r\n for col in self.columns:\r\n setattr(self, col, self.data[col])\r\n \r\n \r\n class Row():\r\n def __init__(self, data, columns):\r\n self.data = data\r\n self.columns = columns\r\n for i, col in enumerate(self.columns):\r\n setattr(self, col, data[i])\r\n \r\n def __getitem__(self, key):\r\n return self.data[self.columns.index(key)]\r\n \r\n def __iter__(self):\r\n return iter(self.data)\r\n \r\n \r\n def iterrows(self):\r\n # similar to iterrows\r\n # but yields a row object as well as the index\r\n # TODO: maybe replace iterrows with this\r\n v = list(self.data.values())\r\n if len(v) == 0:\r\n return\r\n i = 0\r\n while i < len(v[0]):\r\n data = []\r\n for col in v:\r\n data.append(col[i])\r\n row = self.Row(data, self.columns)\r\n yield i, row\r\n i += 1\r\n \r\n \r\n def sort(self, by=None, reverse=False):\r\n '''\r\n sorts the rows\r\n \"by\" has to be a column name\r\n '''\r\n #temp_data = list(self.iterrows())\r\n temp_data = [list(row) for i, row in self.iterrows()]\r\n #print(temp_data)\r\n if not by or by not in self.columns:\r\n i = 0\r\n else:\r\n i = self.columns.index(by)\r\n temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)\r\n \r\n # convert back to self.data structure\r\n for i, row in enumerate(temp_data):\r\n for j, col in enumerate(row):\r\n self.data[self.columns[j]][i] = col\r\n \r\n #return temp_data\r\n \r\n \r\n def to_html(self, filename, format_values={}, rename_columns={},\r\n css=[], column_align={}, caption=None, \r\n format_columns={}):\r\n '''\r\n construct a html table out of this objects's data\r\n filename is a valid *.html or *.htm filename\r\n format_values is a dictionary with column names as keys\r\n and functions as values that take a single value as an argument\r\n and return the formatted (or otherwise processed) value\r\n rename_columns is a dictionary with pairs of\r\n current col name: new col name\r\n css is a list of css elements that are inserted into the\r\n <style> tag\r\n column_align is a dict with column name: align (left, right, center)\r\n caption specifies the table's caption\r\n format_columns is a dictionary with format options for the respective\r\n columns\r\n '''\r\n if len(self.data) == 0:\r\n # return if this has no data\r\n print('HTML building aborted: No data')\r\n return\r\n if filename[-4:] != 'html' and filename[-3:] != 'htm':\r\n print(f'Error: \"{filename}\" is not a valid html file')\r\n return\r\n strTable = '<html><head><style>'\r\n # css table style\r\n # add classes for alignment\r\n strTable += ('.right {text-align: right;} ' +\r\n '.left {text-align: left;} ' +\r\n '.center {text-align: center;}')\r\n \r\n for style in css:\r\n # add css elements to style tag\r\n strTable += style\r\n \r\n strTable += '</style></head><body><table>'\r\n if caption:\r\n strTable += f'<caption>{caption}</caption>'\r\n strTable += '<tr>'\r\n for col in self.columns:\r\n # add column names to table header\r\n if col in rename_columns.keys():\r\n col = rename_columns[col]\r\n strTable += f'<th>{col}</th>'\r\n strTable += '</tr>'\r\n \r\n for i, row in self.iterrows():\r\n # add rows to table\r\n strRW = '<tr>'\r\n for col in self.columns:\r\n strTD = '<td '\r\n value = row[col]\r\n if col in format_values.keys():\r\n value = format_values[col](value)\r\n if col in format_columns.keys():\r\n strTD += format_columns[col]\r\n if col in column_align.keys():\r\n strTD += f' class=\\\"{column_align[col]}\\\">{value}'\r\n else:\r\n strTD += f'>{value}'\r\n strTD += '</td>'\r\n strRW += strTD \r\n strRW += '</tr>'\r\n strTable += strRW\r\n strTable += '</table></body></html>'\r\n \r\n with open(filename, 'w') as html_file:\r\n html_file.write(strTable)\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n file_path = os.path.dirname(os.path.abspath(__file__))\r\n filename = os.path.join(file_path, 'exported_csv', 'staff.csv')\r\n \r\n data = Data()\r\n data.read_csv(filename,\r\n head=True,\r\n column_names = ['A', 'B', 'C', 'D', 'E'],\r\n parse_dates=['date'],\r\n date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())\r\n \r\n table_css = [\r\n 'table {border-collapse: collapse;}',\r\n 'table, th, td {border: 1px solid black;}',\r\n 'th, td {text-align: left; padding: 2px 6px 2px 6px;}'\r\n ]\r\n \r\n data.to_html('temp/test.html', \r\n format_values={'payment': euro,\r\n 'date': date_s},\r\n format_columns={'payment': 'width=400px;'},\r\n rename_columns={'number': 'Number', \r\n 'name': 'Name', \r\n 'date': 'Date',\r\n 'payment': 'Payment'},\r\n css=table_css,\r\n column_align={'payment': 'right'})\r\n \r\n #data.write_csv('test.csv')\r\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
from fractions import Fraction as f
print f(49,98) * f(19, 95) * f(16, 64) * f(26, 65)
|
normal
|
{
"blob_id": "51b32972c97df50a45eb2b9ca58cdec0394e63ee",
"index": 3193,
"step-1": "from fractions import Fraction as f\n\nprint f(49,98) * f(19, 95) * f(16, 64) * f(26, 65)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
import time
import os
import csv
import matplotlib.pyplot as plt
from GELu import GELu
from My_Dataset import MyDataset
from pytorchtools import EarlyStopping
from LSTM import LSTM
'''
Written by KKL on 2020-12-1
This file is used to train LSTM
'''
def train_model(model, DEVICE, patience, n_epochs, csv_record=False):
train_losses = []
valid_losses = []
avg_train_losses = []
avg_valid_losses = []
# initialize the early_stopping object
early_stopping = EarlyStopping(patience=patience, verbose=True)
t1 = time.time()
for epoch in range(1, n_epochs + 1):
###################
# train the model #
###################
model.train() # prep model for training
for step, (feature, label) in enumerate(train_loader, 1):
feature = feature.to(DEVICE)
label = label.to(DEVICE).squeeze()
# print(feature.size(), label.size())
optimizer.zero_grad()
output = model(feature).squeeze()
# print(output.size(), label.size())
loss = loss_func(output, label)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
######################
# test the model #
######################
model.eval() # prep model for evaluation
with torch.no_grad():
for feature, label in valid_loader:
feature = feature.to(DEVICE)
label = label.to(DEVICE)
output = model(feature)
loss = loss_func(output.squeeze(), label.squeeze())
# record validation loss
valid_losses.append(loss.item())
# print training/validation statistics
# calculate average loss over an epoch
train_loss = np.average(train_losses)
valid_loss = np.average(valid_losses)
avg_train_losses.append(train_loss)
avg_valid_losses.append(valid_loss)
epoch_len = len(str(n_epochs))
print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +
f'train_loss: {train_loss:.5f} ' +
f'valid_loss: {valid_loss:.5f}'+ f'| Using time: {time.time()-t1:.5f}')
t1 = time.time()
print(print_msg)
if csv_record==True:
with open(train_log_dir, "a", newline="") as train_log:
writer = csv.writer(train_log)
writer.writerow([epoch, train_loss])
with open(valid_log_dir, "a", newline="") as test_log:
writer = csv.writer(test_log)
writer.writerow([epoch, valid_loss])
# clear lists to track next epoch
train_losses = []
valid_losses = []
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
model.load_state_dict(torch.load('checkpoint.pt'))
return model, avg_train_losses, avg_valid_losses
if __name__ == '__main__':
# Hyper Parameters
EPOCH = 1000
# BATCH_SIZE = 16
BATCH_SIZE = 64 # 等下试试16
LR = 0.001
patience = 100
csv_record = True
# whether use multti GPUs
MultiGPU = False
torch.set_default_dtype(torch.float64)
# torch.backends.cudnn.enabled = False
print('Epoch = ', EPOCH, '|Batch size = ', BATCH_SIZE, '|Learning rate =', LR)
if MultiGPU:
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
torch.cuda.set_device(0)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('DEVICE=', DEVICE, "| PyTorch", torch.__version__, '| CUDA version ', torch.version.cuda, '| cudnn version', torch.backends.cudnn.version())
cPath = os.getcwd() # current path
hdf5_dir = hdf5_dir = r'C:\Users\...\语音信号处理\data.hdf5'
train_data = MyDataset(hdf5_dir, 'train')
valid_data = MyDataset(hdf5_dir, 'valid')
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True,)
valid_loader = torch.utils.data.DataLoader(dataset=valid_data, batch_size=BATCH_SIZE, shuffle=False)
train_log_dir = os.path.join(r'C:\Users\...\语音信号处理\train_log.csv')
valid_log_dir = os.path.join(r'C:\Users\...\语音信号处理\valid_log.csv')
print('train data len:',train_data.__len__())
# log file
with open(train_log_dir, "w", newline="") as train_log:
writer = csv.writer(train_log)
writer.writerow(['epoch', 'loss'])
with open(valid_log_dir, "w", newline="") as valid_log:
writer = csv.writer(valid_log)
writer.writerow(['epoch', 'loss'])
net = LSTM().to(DEVICE)
print(net, '\n\n------------------training start-----------------')
# net.load_state_dict(torch.load('./workspace/'+model_name))
# optimizer = torch.optim.Adam(net.parameters(), lr=LR)
optimizer = torch.optim.Adam(net.parameters(), lr=LR, weight_decay=0.001)
loss_func = nn.MSELoss()
#--------------- training -----------------------
net, train_loss, valid_loss = train_model(net, DEVICE, patience, EPOCH, csv_record)
print('---------------result------')
print('train_loss:',train_loss[-1],'valid_loss:',valid_loss[-1])
torch.save(net.state_dict(), './VAD.pkl')
print('save model successfully')
|
normal
|
{
"blob_id": "80531ac3cc247d48ee36bff581925b8f29f9e235",
"index": 8590,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model(model, DEVICE, patience, n_epochs, csv_record=False):\n train_losses = []\n valid_losses = []\n avg_train_losses = []\n avg_valid_losses = []\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n t1 = time.time()\n for epoch in range(1, n_epochs + 1):\n model.train()\n for step, (feature, label) in enumerate(train_loader, 1):\n feature = feature.to(DEVICE)\n label = label.to(DEVICE).squeeze()\n optimizer.zero_grad()\n output = model(feature).squeeze()\n loss = loss_func(output, label)\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n model.eval()\n with torch.no_grad():\n for feature, label in valid_loader:\n feature = feature.to(DEVICE)\n label = label.to(DEVICE)\n output = model(feature)\n loss = loss_func(output.squeeze(), label.squeeze())\n valid_losses.append(loss.item())\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n epoch_len = len(str(n_epochs))\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f} ' +\n f'valid_loss: {valid_loss:.5f}' +\n f'| Using time: {time.time() - t1:.5f}')\n t1 = time.time()\n print(print_msg)\n if csv_record == True:\n with open(train_log_dir, 'a', newline='') as train_log:\n writer = csv.writer(train_log)\n writer.writerow([epoch, train_loss])\n with open(valid_log_dir, 'a', newline='') as test_log:\n writer = csv.writer(test_log)\n writer.writerow([epoch, valid_loss])\n train_losses = []\n valid_losses = []\n early_stopping(valid_loss, model)\n if early_stopping.early_stop:\n print('Early stopping')\n break\n model.load_state_dict(torch.load('checkpoint.pt'))\n return model, avg_train_losses, avg_valid_losses\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef train_model(model, DEVICE, patience, n_epochs, csv_record=False):\n train_losses = []\n valid_losses = []\n avg_train_losses = []\n avg_valid_losses = []\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n t1 = time.time()\n for epoch in range(1, n_epochs + 1):\n model.train()\n for step, (feature, label) in enumerate(train_loader, 1):\n feature = feature.to(DEVICE)\n label = label.to(DEVICE).squeeze()\n optimizer.zero_grad()\n output = model(feature).squeeze()\n loss = loss_func(output, label)\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n model.eval()\n with torch.no_grad():\n for feature, label in valid_loader:\n feature = feature.to(DEVICE)\n label = label.to(DEVICE)\n output = model(feature)\n loss = loss_func(output.squeeze(), label.squeeze())\n valid_losses.append(loss.item())\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n epoch_len = len(str(n_epochs))\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f} ' +\n f'valid_loss: {valid_loss:.5f}' +\n f'| Using time: {time.time() - t1:.5f}')\n t1 = time.time()\n print(print_msg)\n if csv_record == True:\n with open(train_log_dir, 'a', newline='') as train_log:\n writer = csv.writer(train_log)\n writer.writerow([epoch, train_loss])\n with open(valid_log_dir, 'a', newline='') as test_log:\n writer = csv.writer(test_log)\n writer.writerow([epoch, valid_loss])\n train_losses = []\n valid_losses = []\n early_stopping(valid_loss, model)\n if early_stopping.early_stop:\n print('Early stopping')\n break\n model.load_state_dict(torch.load('checkpoint.pt'))\n return model, avg_train_losses, avg_valid_losses\n\n\nif __name__ == '__main__':\n EPOCH = 1000\n BATCH_SIZE = 64\n LR = 0.001\n patience = 100\n csv_record = True\n MultiGPU = False\n torch.set_default_dtype(torch.float64)\n print('Epoch = ', EPOCH, '|Batch size = ', BATCH_SIZE,\n '|Learning rate =', LR)\n if MultiGPU:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n else:\n torch.cuda.set_device(0)\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('DEVICE=', DEVICE, '| PyTorch', torch.__version__,\n '| CUDA version ', torch.version.cuda, '| cudnn version', torch.\n backends.cudnn.version())\n cPath = os.getcwd()\n hdf5_dir = hdf5_dir = 'C:\\\\Users\\\\...\\\\语音信号处理\\\\data.hdf5'\n train_data = MyDataset(hdf5_dir, 'train')\n valid_data = MyDataset(hdf5_dir, 'valid')\n train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=BATCH_SIZE, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(dataset=valid_data,\n batch_size=BATCH_SIZE, shuffle=False)\n train_log_dir = os.path.join('C:\\\\Users\\\\...\\\\语音信号处理\\\\train_log.csv')\n valid_log_dir = os.path.join('C:\\\\Users\\\\...\\\\语音信号处理\\\\valid_log.csv')\n print('train data len:', train_data.__len__())\n with open(train_log_dir, 'w', newline='') as train_log:\n writer = csv.writer(train_log)\n writer.writerow(['epoch', 'loss'])\n with open(valid_log_dir, 'w', newline='') as valid_log:\n writer = csv.writer(valid_log)\n writer.writerow(['epoch', 'loss'])\n net = LSTM().to(DEVICE)\n print(net, '\\n\\n------------------training start-----------------')\n optimizer = torch.optim.Adam(net.parameters(), lr=LR, weight_decay=0.001)\n loss_func = nn.MSELoss()\n net, train_loss, valid_loss = train_model(net, DEVICE, patience, EPOCH,\n csv_record)\n print('---------------result------')\n print('train_loss:', train_loss[-1], 'valid_loss:', valid_loss[-1])\n torch.save(net.state_dict(), './VAD.pkl')\n print('save model successfully')\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nimport numpy as np\nimport time\nimport os\nimport csv\nimport matplotlib.pyplot as plt\nfrom GELu import GELu\nfrom My_Dataset import MyDataset\nfrom pytorchtools import EarlyStopping\nfrom LSTM import LSTM\n<mask token>\n\n\ndef train_model(model, DEVICE, patience, n_epochs, csv_record=False):\n train_losses = []\n valid_losses = []\n avg_train_losses = []\n avg_valid_losses = []\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n t1 = time.time()\n for epoch in range(1, n_epochs + 1):\n model.train()\n for step, (feature, label) in enumerate(train_loader, 1):\n feature = feature.to(DEVICE)\n label = label.to(DEVICE).squeeze()\n optimizer.zero_grad()\n output = model(feature).squeeze()\n loss = loss_func(output, label)\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n model.eval()\n with torch.no_grad():\n for feature, label in valid_loader:\n feature = feature.to(DEVICE)\n label = label.to(DEVICE)\n output = model(feature)\n loss = loss_func(output.squeeze(), label.squeeze())\n valid_losses.append(loss.item())\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n epoch_len = len(str(n_epochs))\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f} ' +\n f'valid_loss: {valid_loss:.5f}' +\n f'| Using time: {time.time() - t1:.5f}')\n t1 = time.time()\n print(print_msg)\n if csv_record == True:\n with open(train_log_dir, 'a', newline='') as train_log:\n writer = csv.writer(train_log)\n writer.writerow([epoch, train_loss])\n with open(valid_log_dir, 'a', newline='') as test_log:\n writer = csv.writer(test_log)\n writer.writerow([epoch, valid_loss])\n train_losses = []\n valid_losses = []\n early_stopping(valid_loss, model)\n if early_stopping.early_stop:\n print('Early stopping')\n break\n model.load_state_dict(torch.load('checkpoint.pt'))\n return model, avg_train_losses, avg_valid_losses\n\n\nif __name__ == '__main__':\n EPOCH = 1000\n BATCH_SIZE = 64\n LR = 0.001\n patience = 100\n csv_record = True\n MultiGPU = False\n torch.set_default_dtype(torch.float64)\n print('Epoch = ', EPOCH, '|Batch size = ', BATCH_SIZE,\n '|Learning rate =', LR)\n if MultiGPU:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n else:\n torch.cuda.set_device(0)\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('DEVICE=', DEVICE, '| PyTorch', torch.__version__,\n '| CUDA version ', torch.version.cuda, '| cudnn version', torch.\n backends.cudnn.version())\n cPath = os.getcwd()\n hdf5_dir = hdf5_dir = 'C:\\\\Users\\\\...\\\\语音信号处理\\\\data.hdf5'\n train_data = MyDataset(hdf5_dir, 'train')\n valid_data = MyDataset(hdf5_dir, 'valid')\n train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=BATCH_SIZE, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(dataset=valid_data,\n batch_size=BATCH_SIZE, shuffle=False)\n train_log_dir = os.path.join('C:\\\\Users\\\\...\\\\语音信号处理\\\\train_log.csv')\n valid_log_dir = os.path.join('C:\\\\Users\\\\...\\\\语音信号处理\\\\valid_log.csv')\n print('train data len:', train_data.__len__())\n with open(train_log_dir, 'w', newline='') as train_log:\n writer = csv.writer(train_log)\n writer.writerow(['epoch', 'loss'])\n with open(valid_log_dir, 'w', newline='') as valid_log:\n writer = csv.writer(valid_log)\n writer.writerow(['epoch', 'loss'])\n net = LSTM().to(DEVICE)\n print(net, '\\n\\n------------------training start-----------------')\n optimizer = torch.optim.Adam(net.parameters(), lr=LR, weight_decay=0.001)\n loss_func = nn.MSELoss()\n net, train_loss, valid_loss = train_model(net, DEVICE, patience, EPOCH,\n csv_record)\n print('---------------result------')\n print('train_loss:', train_loss[-1], 'valid_loss:', valid_loss[-1])\n torch.save(net.state_dict(), './VAD.pkl')\n print('save model successfully')\n",
"step-5": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.nn.init as init\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom GELu import GELu\r\nfrom My_Dataset import MyDataset\r\nfrom pytorchtools import EarlyStopping\r\nfrom LSTM import LSTM\r\n\r\n'''\r\nWritten by KKL on 2020-12-1 \r\n\r\nThis file is used to train LSTM\r\n'''\r\n\r\n\r\ndef train_model(model, DEVICE, patience, n_epochs, csv_record=False):\r\n train_losses = []\r\n valid_losses = []\r\n avg_train_losses = []\r\n avg_valid_losses = []\r\n\r\n # initialize the early_stopping object\r\n early_stopping = EarlyStopping(patience=patience, verbose=True)\r\n\r\n t1 = time.time()\r\n for epoch in range(1, n_epochs + 1):\r\n ###################\r\n # train the model #\r\n ###################\r\n model.train() # prep model for training\r\n for step, (feature, label) in enumerate(train_loader, 1):\r\n feature = feature.to(DEVICE)\r\n label = label.to(DEVICE).squeeze()\r\n # print(feature.size(), label.size())\r\n\r\n optimizer.zero_grad()\r\n output = model(feature).squeeze()\r\n # print(output.size(), label.size())\r\n\r\n loss = loss_func(output, label)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n train_losses.append(loss.item())\r\n\r\n ######################\r\n # test the model #\r\n ######################\r\n model.eval() # prep model for evaluation\r\n with torch.no_grad():\r\n for feature, label in valid_loader:\r\n feature = feature.to(DEVICE)\r\n label = label.to(DEVICE)\r\n\r\n output = model(feature)\r\n loss = loss_func(output.squeeze(), label.squeeze())\r\n # record validation loss\r\n valid_losses.append(loss.item())\r\n\r\n # print training/validation statistics\r\n # calculate average loss over an epoch\r\n train_loss = np.average(train_losses)\r\n valid_loss = np.average(valid_losses)\r\n avg_train_losses.append(train_loss)\r\n avg_valid_losses.append(valid_loss)\r\n\r\n epoch_len = len(str(n_epochs))\r\n\r\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epochs:>{epoch_len}}] ' +\r\n f'train_loss: {train_loss:.5f} ' +\r\n f'valid_loss: {valid_loss:.5f}'+ f'| Using time: {time.time()-t1:.5f}')\r\n t1 = time.time()\r\n\r\n print(print_msg)\r\n if csv_record==True:\r\n with open(train_log_dir, \"a\", newline=\"\") as train_log:\r\n writer = csv.writer(train_log)\r\n writer.writerow([epoch, train_loss])\r\n with open(valid_log_dir, \"a\", newline=\"\") as test_log:\r\n writer = csv.writer(test_log)\r\n writer.writerow([epoch, valid_loss])\r\n\r\n\r\n # clear lists to track next epoch\r\n train_losses = []\r\n valid_losses = []\r\n\r\n # early_stopping needs the validation loss to check if it has decresed,\r\n # and if it has, it will make a checkpoint of the current model\r\n early_stopping(valid_loss, model)\r\n\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # load the last checkpoint with the best model\r\n model.load_state_dict(torch.load('checkpoint.pt'))\r\n return model, avg_train_losses, avg_valid_losses\r\n\r\nif __name__ == '__main__':\r\n # Hyper Parameters\r\n EPOCH = 1000\r\n # BATCH_SIZE = 16\r\n BATCH_SIZE = 64 # 等下试试16\r\n LR = 0.001\r\n patience = 100\r\n csv_record = True\r\n\r\n # whether use multti GPUs\r\n MultiGPU = False\r\n torch.set_default_dtype(torch.float64)\r\n # torch.backends.cudnn.enabled = False\r\n print('Epoch = ', EPOCH, '|Batch size = ', BATCH_SIZE, '|Learning rate =', LR)\r\n if MultiGPU:\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\r\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n else:\r\n torch.cuda.set_device(0)\r\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n print('DEVICE=', DEVICE, \"| PyTorch\", torch.__version__, '| CUDA version ', torch.version.cuda, '| cudnn version', torch.backends.cudnn.version())\r\n\r\n\r\n\r\n cPath = os.getcwd() # current path\r\n hdf5_dir = hdf5_dir = r'C:\\Users\\...\\语音信号处理\\data.hdf5'\r\n train_data = MyDataset(hdf5_dir, 'train')\r\n valid_data = MyDataset(hdf5_dir, 'valid')\r\n train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True,)\r\n valid_loader = torch.utils.data.DataLoader(dataset=valid_data, batch_size=BATCH_SIZE, shuffle=False)\r\n train_log_dir = os.path.join(r'C:\\Users\\...\\语音信号处理\\train_log.csv')\r\n valid_log_dir = os.path.join(r'C:\\Users\\...\\语音信号处理\\valid_log.csv')\r\n print('train data len:',train_data.__len__())\r\n\r\n # log file\r\n with open(train_log_dir, \"w\", newline=\"\") as train_log:\r\n writer = csv.writer(train_log)\r\n writer.writerow(['epoch', 'loss'])\r\n with open(valid_log_dir, \"w\", newline=\"\") as valid_log:\r\n writer = csv.writer(valid_log)\r\n writer.writerow(['epoch', 'loss'])\r\n\r\n net = LSTM().to(DEVICE)\r\n print(net, '\\n\\n------------------training start-----------------')\r\n # net.load_state_dict(torch.load('./workspace/'+model_name))\r\n # optimizer = torch.optim.Adam(net.parameters(), lr=LR)\r\n optimizer = torch.optim.Adam(net.parameters(), lr=LR, weight_decay=0.001)\r\n loss_func = nn.MSELoss()\r\n\r\n #--------------- training -----------------------\r\n net, train_loss, valid_loss = train_model(net, DEVICE, patience, EPOCH, csv_record)\r\n print('---------------result------')\r\n print('train_loss:',train_loss[-1],'valid_loss:',valid_loss[-1])\r\n\r\n torch.save(net.state_dict(), './VAD.pkl')\r\n print('save model successfully')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from compas.geometry import Frame
|
normal
|
{
"blob_id": "d4e3751b2d4796c72be497007fe4c7d8ca67e18e",
"index": 6874,
"step-1": "<mask token>\n",
"step-2": "from compas.geometry import Frame\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: NVLGPSStatus.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='NVLGPSStatus.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x12NVLGPSStatus.proto\"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08\x64\x61te_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08')
)
_NVLGPSSTATUS = _descriptor.Descriptor(
name='NVLGPSStatus',
full_name='NVLGPSStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_day', full_name='NVLGPSStatus.date_day', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_month', full_name='NVLGPSStatus.date_month', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_year', full_name='NVLGPSStatus.date_year', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latitude', full_name='NVLGPSStatus.latitude', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='longitude', full_name='NVLGPSStatus.longitude', index=10,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=23,
serialized_end=420,
)
DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(
DESCRIPTOR = _NVLGPSSTATUS,
__module__ = 'NVLGPSStatus_pb2'
# @@protoc_insertion_point(class_scope:NVLGPSStatus)
))
_sym_db.RegisterMessage(NVLGPSStatus)
# @@protoc_insertion_point(module_scope)
|
normal
|
{
"blob_id": "98d2196439a8dc3d511d176e61897aa67663a0b5",
"index": 4922,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n<mask token>\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-3": "<mask token>\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\n<mask token>\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-4": "import sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-5": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: NVLGPSStatus.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='NVLGPSStatus.proto',\n package='',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=_b('\\n\\x12NVLGPSStatus.proto\\\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08\\x64\\x61te_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08')\n)\n\n\n\n\n_NVLGPSSTATUS = _descriptor.Descriptor(\n name='NVLGPSStatus',\n full_name='NVLGPSStatus',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,\n number=1, type=12, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,\n number=2, type=8, cpp_type=7, label=2,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_day', full_name='NVLGPSStatus.date_day', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_year', full_name='NVLGPSStatus.date_year', index=4,\n number=5, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,\n number=6, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,\n number=7, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,\n number=8, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,\n number=9, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='latitude', full_name='NVLGPSStatus.latitude', index=9,\n number=10, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='longitude', full_name='NVLGPSStatus.longitude', index=10,\n number=11, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,\n number=12, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,\n number=13, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,\n number=14, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,\n number=15, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,\n number=16, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,\n number=17, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=23,\n serialized_end=420,\n)\n\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(\n DESCRIPTOR = _NVLGPSSTATUS,\n __module__ = 'NVLGPSStatus_pb2'\n # @@protoc_insertion_point(class_scope:NVLGPSStatus)\n ))\n_sym_db.RegisterMessage(NVLGPSStatus)\n\n\n# @@protoc_insertion_point(module_scope)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model,
BaseManager,
UUIDField,
sane_repr,
)
class MonitorLocation(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('guid', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
__repr__ = sane_repr('guid', 'name')
|
normal
|
{
"blob_id": "1a4132358fa9bd4cd74970286ec8bb212b1857cd",
"index": 5247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MonitorLocation(Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MonitorLocation(Model):\n __core__ = True\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid',))\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n __repr__ = sane_repr('guid', 'name')\n",
"step-4": "from __future__ import absolute_import, print_function\nfrom django.db import models\nfrom django.utils import timezone\nfrom sentry.db.models import Model, BaseManager, UUIDField, sane_repr\n\n\nclass MonitorLocation(Model):\n __core__ = True\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid',))\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n __repr__ = sane_repr('guid', 'name')\n",
"step-5": "from __future__ import absolute_import, print_function\n\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom sentry.db.models import (\n Model,\n BaseManager,\n UUIDField,\n sane_repr,\n)\n\n\nclass MonitorLocation(Model):\n __core__ = True\n\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid', ))\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n\n __repr__ = sane_repr('guid', 'name')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CommentForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CommentForm(forms.Form):
name = forms.CharField(label='称呼')
email = forms.EmailField(label='邮箱')
content = forms.CharField(label='内容')
<|reserved_special_token_1|>
from django import forms
class CommentForm(forms.Form):
name = forms.CharField(label='称呼')
email = forms.EmailField(label='邮箱')
content = forms.CharField(label='内容')
|
flexible
|
{
"blob_id": "c2ff3c5e44fa361671a3fdb38060517bcc4bc82c",
"index": 2778,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommentForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n",
"step-4": "from django import forms\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import paho.mqtt.client as mqtt
from datetime import datetime
import ssl
from collections import OrderedDict
import time
from tkinter import *
import numpy as np
MQTT_IP = 'emq'
MQTT_PORT = 8883
username = "spread_ICAM"
password = "spread_ICAM"
deviceType = "spread_ICAM"
version = "v1"
def on_connect(client, userdata, flags, rc):
"""0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused."""
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# If connection successful start publishing data
# if rc == 0:
# client.subscribe(subscribeTopic)
# self.__send_data_loop()
def on_message(client, userdata, msg):
print(str(datetime.now()) + " Message Received: " + str(msg.payload))
publishTopic = "%s_%s/%s/events" % (deviceType, version, username)
subscribeTopic = "%s_%s/%s/operations" % (deviceType, version, username)
# se non imposto il client_id non riesce a connettersi!!!!!
client = mqtt.Client(client_id="TentativoRaffo")
client.tls_set(ca_certs="digitalfuture_ca_public.pem", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_SSLv23, ciphers=None)
client.tls_insecure_set(False)
client.username_pw_set(username, password=password)
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_IP, MQTT_PORT, 60, bind_address="")
client.loop_start()
#########################
#
# CREATE THE GUI
#
#########################
root = Tk()
Label(root, text="Spread simulator").grid(row=0, column=1, pady=5)
Label(root, text="Kg").grid(row=1, column=0, pady=5)
text_id = Text(root, height=1, width=10)
text_id.grid(row=1, column=1, padx=5, pady=5)
Label(root, text="Peso in kg del vassoio prelevato (Kg)").grid(row=1, column=2, pady=5)
Label(root, text="mm_kg").grid(row=2, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=2, column=1, padx=5, pady=5)
Label(root, text="Di quanti mm affonda per ogni kg prelevato (mm)").grid(row=2, column=2, pady=5)
Label(root, text="s").grid(row=3, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=3, column=1, padx=5, pady=5)
Label(root, text="Coefficiente di sovraelongazione delle catene").grid(row=3, column=2, pady=5)
Label(root, text="interval").grid(row=4, column=0, pady=5)
text_speed = Text(root, height=1, width=10)
text_speed.grid(row=4, column=1, padx=5, pady=5)
Label(root, text="Intervallo di invio dati (s)").grid(row=4, column=2, pady=5)
btn_start = Button(root)
btn_start["text"] = "Start"
btn_start.grid(row=5, column=1, padx=5, pady=5)
btn_start = Button(root)
btn_start["text"] = "Stop"
btn_start.grid(row=6, column=1, padx=5, pady=5)
interval_time = 1000;
def task():
spread = np.random.normal(loc=0.708727, scale=0.192176)
print("spread")
root.after(interval_time, task) # reschedule event in 2 seconds
root.after(interval_time, task)
root.mainloop()
root.destroy()
i=0
timestamp = 1234567890123
while(True):
time.sleep(1)
timestamp += i
print(timestamp)
ordered_obj_to_send = OrderedDict([
("spread", 3.0),
("timestamp_", timestamp),
("date", "eee")])
client.publish(publishTopic, json.dumps(ordered_obj_to_send), qos=2)
i+=1
#time.sleep(2)
|
normal
|
{
"blob_id": "f3664f5f69207c3f2dcec96c90cd220003da0904",
"index": 4142,
"step-1": "<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\"\"\"\n print('Connected with result code ' + str(rc))\n\n\ndef on_message(client, userdata, msg):\n print(str(datetime.now()) + ' Message Received: ' + str(msg.payload))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\"\"\"\n print('Connected with result code ' + str(rc))\n\n\ndef on_message(client, userdata, msg):\n print(str(datetime.now()) + ' Message Received: ' + str(msg.payload))\n\n\n<mask token>\n\n\ndef task():\n spread = np.random.normal(loc=0.708727, scale=0.192176)\n print('spread')\n root.after(interval_time, task)\n\n\n<mask token>\n",
"step-3": "<mask token>\nMQTT_IP = 'emq'\nMQTT_PORT = 8883\nusername = 'spread_ICAM'\npassword = 'spread_ICAM'\ndeviceType = 'spread_ICAM'\nversion = 'v1'\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\"\"\"\n print('Connected with result code ' + str(rc))\n\n\ndef on_message(client, userdata, msg):\n print(str(datetime.now()) + ' Message Received: ' + str(msg.payload))\n\n\npublishTopic = '%s_%s/%s/events' % (deviceType, version, username)\nsubscribeTopic = '%s_%s/%s/operations' % (deviceType, version, username)\nclient = mqtt.Client(client_id='TentativoRaffo')\nclient.tls_set(ca_certs='digitalfuture_ca_public.pem', certfile=None,\n keyfile=None, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.\n PROTOCOL_SSLv23, ciphers=None)\nclient.tls_insecure_set(False)\nclient.username_pw_set(username, password=password)\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(MQTT_IP, MQTT_PORT, 60, bind_address='')\nclient.loop_start()\nroot = Tk()\nLabel(root, text='Spread simulator').grid(row=0, column=1, pady=5)\nLabel(root, text='Kg').grid(row=1, column=0, pady=5)\ntext_id = Text(root, height=1, width=10)\ntext_id.grid(row=1, column=1, padx=5, pady=5)\nLabel(root, text='Peso in kg del vassoio prelevato (Kg)').grid(row=1,\n column=2, pady=5)\nLabel(root, text='mm_kg').grid(row=2, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=2, column=1, padx=5, pady=5)\nLabel(root, text='Di quanti mm affonda per ogni kg prelevato (mm)').grid(row\n =2, column=2, pady=5)\nLabel(root, text='s').grid(row=3, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=3, column=1, padx=5, pady=5)\nLabel(root, text='Coefficiente di sovraelongazione delle catene').grid(row=\n 3, column=2, pady=5)\nLabel(root, text='interval').grid(row=4, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=4, column=1, padx=5, pady=5)\nLabel(root, text='Intervallo di invio dati (s)').grid(row=4, column=2, pady=5)\nbtn_start = Button(root)\nbtn_start['text'] = 'Start'\nbtn_start.grid(row=5, column=1, padx=5, pady=5)\nbtn_start = Button(root)\nbtn_start['text'] = 'Stop'\nbtn_start.grid(row=6, column=1, padx=5, pady=5)\ninterval_time = 1000\n\n\ndef task():\n spread = np.random.normal(loc=0.708727, scale=0.192176)\n print('spread')\n root.after(interval_time, task)\n\n\nroot.after(interval_time, task)\nroot.mainloop()\nroot.destroy()\ni = 0\ntimestamp = 1234567890123\nwhile True:\n time.sleep(1)\n timestamp += i\n print(timestamp)\n ordered_obj_to_send = OrderedDict([('spread', 3.0), ('timestamp_',\n timestamp), ('date', 'eee')])\n client.publish(publishTopic, json.dumps(ordered_obj_to_send), qos=2)\n i += 1\n",
"step-4": "import json\nimport paho.mqtt.client as mqtt\nfrom datetime import datetime\nimport ssl\nfrom collections import OrderedDict\nimport time\nfrom tkinter import *\nimport numpy as np\nMQTT_IP = 'emq'\nMQTT_PORT = 8883\nusername = 'spread_ICAM'\npassword = 'spread_ICAM'\ndeviceType = 'spread_ICAM'\nversion = 'v1'\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\"\"\"\n print('Connected with result code ' + str(rc))\n\n\ndef on_message(client, userdata, msg):\n print(str(datetime.now()) + ' Message Received: ' + str(msg.payload))\n\n\npublishTopic = '%s_%s/%s/events' % (deviceType, version, username)\nsubscribeTopic = '%s_%s/%s/operations' % (deviceType, version, username)\nclient = mqtt.Client(client_id='TentativoRaffo')\nclient.tls_set(ca_certs='digitalfuture_ca_public.pem', certfile=None,\n keyfile=None, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.\n PROTOCOL_SSLv23, ciphers=None)\nclient.tls_insecure_set(False)\nclient.username_pw_set(username, password=password)\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(MQTT_IP, MQTT_PORT, 60, bind_address='')\nclient.loop_start()\nroot = Tk()\nLabel(root, text='Spread simulator').grid(row=0, column=1, pady=5)\nLabel(root, text='Kg').grid(row=1, column=0, pady=5)\ntext_id = Text(root, height=1, width=10)\ntext_id.grid(row=1, column=1, padx=5, pady=5)\nLabel(root, text='Peso in kg del vassoio prelevato (Kg)').grid(row=1,\n column=2, pady=5)\nLabel(root, text='mm_kg').grid(row=2, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=2, column=1, padx=5, pady=5)\nLabel(root, text='Di quanti mm affonda per ogni kg prelevato (mm)').grid(row\n =2, column=2, pady=5)\nLabel(root, text='s').grid(row=3, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=3, column=1, padx=5, pady=5)\nLabel(root, text='Coefficiente di sovraelongazione delle catene').grid(row=\n 3, column=2, pady=5)\nLabel(root, text='interval').grid(row=4, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=4, column=1, padx=5, pady=5)\nLabel(root, text='Intervallo di invio dati (s)').grid(row=4, column=2, pady=5)\nbtn_start = Button(root)\nbtn_start['text'] = 'Start'\nbtn_start.grid(row=5, column=1, padx=5, pady=5)\nbtn_start = Button(root)\nbtn_start['text'] = 'Stop'\nbtn_start.grid(row=6, column=1, padx=5, pady=5)\ninterval_time = 1000\n\n\ndef task():\n spread = np.random.normal(loc=0.708727, scale=0.192176)\n print('spread')\n root.after(interval_time, task)\n\n\nroot.after(interval_time, task)\nroot.mainloop()\nroot.destroy()\ni = 0\ntimestamp = 1234567890123\nwhile True:\n time.sleep(1)\n timestamp += i\n print(timestamp)\n ordered_obj_to_send = OrderedDict([('spread', 3.0), ('timestamp_',\n timestamp), ('date', 'eee')])\n client.publish(publishTopic, json.dumps(ordered_obj_to_send), qos=2)\n i += 1\n",
"step-5": "import json\nimport paho.mqtt.client as mqtt\nfrom datetime import datetime\nimport ssl\nfrom collections import OrderedDict\nimport time\nfrom tkinter import *\nimport numpy as np\n\nMQTT_IP = 'emq'\nMQTT_PORT = 8883\n\nusername = \"spread_ICAM\"\npassword = \"spread_ICAM\"\ndeviceType = \"spread_ICAM\"\nversion = \"v1\"\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\"\"\"\n print(\"Connected with result code \" + str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n # If connection successful start publishing data\n # if rc == 0:\n # client.subscribe(subscribeTopic)\n # self.__send_data_loop()\n\n\ndef on_message(client, userdata, msg):\n print(str(datetime.now()) + \" Message Received: \" + str(msg.payload))\n\n\npublishTopic = \"%s_%s/%s/events\" % (deviceType, version, username)\nsubscribeTopic = \"%s_%s/%s/operations\" % (deviceType, version, username)\n# se non imposto il client_id non riesce a connettersi!!!!!\nclient = mqtt.Client(client_id=\"TentativoRaffo\")\nclient.tls_set(ca_certs=\"digitalfuture_ca_public.pem\", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,\n tls_version=ssl.PROTOCOL_SSLv23, ciphers=None)\nclient.tls_insecure_set(False)\nclient.username_pw_set(username, password=password)\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(MQTT_IP, MQTT_PORT, 60, bind_address=\"\")\nclient.loop_start()\n\n\n\n#########################\n#\n# CREATE THE GUI\n#\n#########################\n\n\nroot = Tk()\n\nLabel(root, text=\"Spread simulator\").grid(row=0, column=1, pady=5)\n\nLabel(root, text=\"Kg\").grid(row=1, column=0, pady=5)\ntext_id = Text(root, height=1, width=10)\ntext_id.grid(row=1, column=1, padx=5, pady=5)\nLabel(root, text=\"Peso in kg del vassoio prelevato (Kg)\").grid(row=1, column=2, pady=5)\n\n\nLabel(root, text=\"mm_kg\").grid(row=2, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=2, column=1, padx=5, pady=5)\nLabel(root, text=\"Di quanti mm affonda per ogni kg prelevato (mm)\").grid(row=2, column=2, pady=5)\n\nLabel(root, text=\"s\").grid(row=3, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=3, column=1, padx=5, pady=5)\nLabel(root, text=\"Coefficiente di sovraelongazione delle catene\").grid(row=3, column=2, pady=5)\n\nLabel(root, text=\"interval\").grid(row=4, column=0, pady=5)\ntext_speed = Text(root, height=1, width=10)\ntext_speed.grid(row=4, column=1, padx=5, pady=5)\nLabel(root, text=\"Intervallo di invio dati (s)\").grid(row=4, column=2, pady=5)\n\nbtn_start = Button(root)\nbtn_start[\"text\"] = \"Start\"\nbtn_start.grid(row=5, column=1, padx=5, pady=5)\n\nbtn_start = Button(root)\nbtn_start[\"text\"] = \"Stop\"\nbtn_start.grid(row=6, column=1, padx=5, pady=5)\n\ninterval_time = 1000;\n\ndef task():\n\n spread = np.random.normal(loc=0.708727, scale=0.192176)\n print(\"spread\")\n root.after(interval_time, task) # reschedule event in 2 seconds\n\nroot.after(interval_time, task)\n\nroot.mainloop()\nroot.destroy()\n\n\ni=0\ntimestamp = 1234567890123\nwhile(True):\n\n\n time.sleep(1)\n timestamp += i\n print(timestamp)\n\n ordered_obj_to_send = OrderedDict([\n (\"spread\", 3.0),\n (\"timestamp_\", timestamp),\n (\"date\", \"eee\")])\n client.publish(publishTopic, json.dumps(ordered_obj_to_send), qos=2)\n i+=1\n#time.sleep(2)",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
"""
Primos <generadores> 30 pts
Realice una generador que devuelva de todos lo numeros primos
existentes de 0 hasta n-1 que cumpla con el siguiente prototipo:
def gprimo(N):
pass
a = gprimo(10)
z = [e for e in a]
print(z)
# [2, 3 ,5 ,7 ]
"""
def gprimo(nmax):
for x in range(1,nmax):
for i in range(2,x):
if x % i != 0:
#i no es divisor de x, x puede ser primo
continue
else:
#i es divisor de x, x no es primo
break
else:
#El bucle ha terminado con normalidad, el número que estabamos comprobando es primo
yield x
a = gprimo(10)
z =[e for e in a]
print(z)
"""
Bada Boom!!! <generadores> 20 pts
Defina un generador que reciba un numero entero positivo mayor a 0 N,
dicho generador proporciona numero de 1 hasta N
con las siguientes condiciones:
1) si es multiplo de 3 coloque la cadena "Bada"
2) si es multiplo de 5 coloque la cadena "Boom!!"
3) si es multiplo de 3 y 5 coloque "Bada Boom!!"
def genBadaBoom(N):
pass
a = genBadaBoom(10)
z = [e for e in a]
print(z)
#[1,2,"Bada",4,"Boom","Bada",7,8,"Bada","Boom"]
"""
def genBadaBoom(N):
if N > 0:
for i in range(1,N+1):
if(i % 3 == 0 and i % 5 == 0):
yield "Bada Boom!!"
elif(i % 3 == 0):
yield "Bada"
elif(i % 5 == 0):
yield "Boom!!"
else:
yield i
a = genBadaBoom(10)
z = [e for e in a]
print(z)
"""
Combinaciones <Comprensión de listas> 30pts
Una tienda de ropa quiere saber cuantos conjuntos se pueden crear
a partir de un grupo de 5 camisas (roja,negra,azul,morada y cafe),
4 pantalones (negro, azul, cafe obscuro y crema) y uno de 4 accesorios
posibles (cinturon, tirantes, lentes, fedora)
1) Obtenga una lista con todos los conjuntos posibles e imprimala en pantalla
2) imprima un mensaje donde mencione la cantidad de conjuntos posibles
"""
camisas = ["roja","negra","azul","morada","cafe"]
pantalones = ["negro", "azul", "cafe obscuro", "crema"]
accesorios = ["cinturon", "tirantes", "lentes", "fedora"]
combinaciones = [(x, y, z) for y in camisas for x in pantalones for z in accesorios]
print(combinaciones)
print("El número de combinaciones es:",len(combinaciones))
"""
¿Fedora? <Comprensión de listas > 15 pts
Del problema anterior imprima una lista que tenga todos los conjuntos
que incluyen un sombrero fedora y tambien despliegue su longitud
"""
combinacionesFedora = [(x, y, z) for (x,y,z) in combinaciones if z == 'fedora']
print(combinacionesFedora)
print("Número de combinaciones que incluyen sombrero fedora:",len(combinacionesFedora))
"""
<Monads> 30 pts
--Lacrimosa - Durch Nacht und Flut --
Die Suche endet jetzt und hier
Gestein kalt und nass
Granit in Deiner Brust
Der Stein der Dich zerdrückt
Der Fels der Dich umgibt
Aus dem gehauen Du doch bist
Despiertate te busco
Mi corazon abreté te libro
Elevate mi luz y prende mi llama
Si a ti, yo se, te encontrare
El fragmento anterior es un canción del duo lacrimosa
Usando Monads obtenga la letra
que menos se repite por cada linea y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
"""
<Monads>
--Hole in my soul apocalyptica-- 20 pts
El fragmento anterior es un canción del grupo apocalyptica
Usando Monads obtenga la letra
que menos se repite de todo el fragmento y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
cancion = """There's a hole in my heart, in my life, in my way
And it's filled with regret and all I did, to push you away
If there's still a place in your life, in your heart for me
I would do anything, so don't ask me to leave
I've got a hole in my soul where you use to be
You're the thorn in my heart and you're killing me
I wish I could go back and do it all differently
I wish that I'd treated you differently
'Cause now there's a hole in my soul where you use to be"""
cancion = list(cancion)#Lo hacemos una lista
frecuenciaPalab = [cancion.count(w.casefold()) for w in cancion] #contamos la frecuencia de cada letra sin importarnos si la letra se repite
letra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab),cancion) #aplicamos un filtro a esa lista que nos devuela las letras que coinciden con el numero minimo en la frecuencia de letras que ya habiamos calculado
Y = list(letra)#Lo hacemos lista
Y = dict.fromkeys(Y).keys()#Para evitar valores duplicados que en un diccionario no se pueden duplicar los valores
print(Y)
|
normal
|
{
"blob_id": "732886306d949c4059b08e1bc46de3ad95ba56cb",
"index": 1685,
"step-1": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\n<mask token>\nprint(z)\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\n<mask token>\nprint(z)\n<mask token>\nprint(combinaciones)\nprint('El número de combinaciones es:', len(combinaciones))\n<mask token>\nprint(combinacionesFedora)\nprint('Número de combinaciones que incluyen sombrero fedora:', len(\n combinacionesFedora))\n<mask token>\nprint(Y)\n",
"step-4": "<mask token>\n\n\ndef gprimo(nmax):\n for x in range(1, nmax):\n for i in range(2, x):\n if x % i != 0:\n continue\n else:\n break\n else:\n yield x\n\n\na = gprimo(10)\nz = [e for e in a]\nprint(z)\n<mask token>\n\n\ndef genBadaBoom(N):\n if N > 0:\n for i in range(1, N + 1):\n if i % 3 == 0 and i % 5 == 0:\n yield 'Bada Boom!!'\n elif i % 3 == 0:\n yield 'Bada'\n elif i % 5 == 0:\n yield 'Boom!!'\n else:\n yield i\n\n\na = genBadaBoom(10)\nz = [e for e in a]\nprint(z)\n<mask token>\ncamisas = ['roja', 'negra', 'azul', 'morada', 'cafe']\npantalones = ['negro', 'azul', 'cafe obscuro', 'crema']\naccesorios = ['cinturon', 'tirantes', 'lentes', 'fedora']\ncombinaciones = [(x, y, z) for y in camisas for x in pantalones for z in\n accesorios]\nprint(combinaciones)\nprint('El número de combinaciones es:', len(combinaciones))\n<mask token>\ncombinacionesFedora = [(x, y, z) for x, y, z in combinaciones if z == 'fedora']\nprint(combinacionesFedora)\nprint('Número de combinaciones que incluyen sombrero fedora:', len(\n combinacionesFedora))\n<mask token>\ncancion = \"\"\"There's a hole in my heart, in my life, in my way\nAnd it's filled with regret and all I did, to push you away\nIf there's still a place in your life, in your heart for me\nI would do anything, so don't ask me to leave\n\nI've got a hole in my soul where you use to be\nYou're the thorn in my heart and you're killing me\nI wish I could go back and do it all differently\nI wish that I'd treated you differently\n'Cause now there's a hole in my soul where you use to be\"\"\"\ncancion = list(cancion)\nfrecuenciaPalab = [cancion.count(w.casefold()) for w in cancion]\nletra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab), cancion)\nY = list(letra)\nY = dict.fromkeys(Y).keys()\nprint(Y)\n",
"step-5": "\"\"\"\n\n Primos <generadores> 30 pts\n\n\tRealice una generador que devuelva de todos lo numeros primos\n\texistentes de 0 hasta n-1 que cumpla con el siguiente prototipo:\n\t\n\tdef gprimo(N):\n\t\tpass\n\t\n\t\n\ta = gprimo(10)\n\tz = [e for e in a]\n\tprint(z)\n\t# [2, 3 ,5 ,7 ]\n\"\"\"\n\ndef gprimo(nmax):\n\tfor x in range(1,nmax):\n\t\tfor i in range(2,x):\n\t\t\tif x % i != 0:\n\t\t\t\t#i no es divisor de x, x puede ser primo\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\t#i es divisor de x, x no es primo\n\t\t\t\tbreak\n\t\telse:\n\t\t\t#El bucle ha terminado con normalidad, el número que estabamos comprobando es primo\n\t\t\tyield x\n\na = gprimo(10)\nz =[e for e in a]\nprint(z)\n\n\n\"\"\"\nBada Boom!!! <generadores> 20 pts\n\t\n\tDefina un generador que reciba un numero entero positivo mayor a 0 N,\n\tdicho generador proporciona numero de 1 hasta N\n\tcon las siguientes condiciones:\n\t\t1) si es multiplo de 3 coloque la cadena \"Bada\"\n\t\t2) si es multiplo de 5 coloque la cadena \"Boom!!\"\n\t\t3) si es multiplo de 3 y 5 coloque \"Bada Boom!!\"\n\t\t\n\tdef genBadaBoom(N):\n\t\tpass\n\t\t\n\ta = genBadaBoom(10)\n\tz = [e for e in a]\n\tprint(z)\n\t#[1,2,\"Bada\",4,\"Boom\",\"Bada\",7,8,\"Bada\",\"Boom\"]\n\"\"\"\ndef genBadaBoom(N):\n\tif N > 0:\n\t\tfor i in range(1,N+1):\n\t\t\tif(i % 3 == 0 and i % 5 == 0):\n\t\t\t\tyield \"Bada Boom!!\"\n\t\t\telif(i % 3 == 0):\n\t\t\t\tyield \"Bada\"\n\t\t\telif(i % 5 == 0):\n\t\t\t\tyield \"Boom!!\"\n\t\t\telse:\n\t\t\t\tyield i\n\t\t\t\na = genBadaBoom(10)\nz = [e for e in a]\nprint(z)\n\n\"\"\"\n\n\nCombinaciones <Comprensión de listas> 30pts\n\n\tUna tienda de ropa quiere saber cuantos conjuntos se pueden crear \n\ta partir de un grupo de 5 camisas (roja,negra,azul,morada y cafe), \n\t4 pantalones (negro, azul, cafe obscuro y crema) y uno de 4 accesorios\n\tposibles (cinturon, tirantes, lentes, fedora)\n\t\n\t1) Obtenga una lista con todos los conjuntos posibles e imprimala en pantalla\n\t2) imprima un mensaje donde mencione la cantidad de conjuntos posibles\n\t\n\"\"\"\n\ncamisas = [\"roja\",\"negra\",\"azul\",\"morada\",\"cafe\"]\npantalones = [\"negro\", \"azul\", \"cafe obscuro\", \"crema\"]\naccesorios = [\"cinturon\", \"tirantes\", \"lentes\", \"fedora\"]\ncombinaciones = [(x, y, z) for y in camisas for x in pantalones for z in accesorios]\nprint(combinaciones)\nprint(\"El número de combinaciones es:\",len(combinaciones))\n\"\"\"\n \n¿Fedora? <Comprensión de listas > 15 pts\n\n\tDel problema anterior imprima una lista que tenga todos los conjuntos\n\tque incluyen un sombrero fedora y tambien despliegue su longitud\n\t\n\t\n\"\"\"\ncombinacionesFedora = [(x, y, z) for (x,y,z) in combinaciones if z == 'fedora']\nprint(combinacionesFedora)\nprint(\"Número de combinaciones que incluyen sombrero fedora:\",len(combinacionesFedora))\n\"\"\"\n<Monads> 30 pts\n\n--Lacrimosa - Durch Nacht und Flut -- \n\nDie Suche endet jetzt und hier\nGestein kalt und nass\nGranit in Deiner Brust\nDer Stein der Dich zerdrückt\nDer Fels der Dich umgibt\nAus dem gehauen Du doch bist\n\nDespiertate te busco\nMi corazon abreté te libro\nElevate mi luz y prende mi llama\nSi a ti, yo se, te encontrare\n\nEl fragmento anterior es un canción del duo lacrimosa\n\nUsando Monads obtenga la letra \nque menos se repite por cada linea y obtenga la probabilidad de sacar dicha\nletra.\n\nNota: Pueden ayudarse de funciones recursivas y compresiones de lista. \n\n\"\"\"\n\n\n\"\"\"\n<Monads>\n\n--Hole in my soul apocalyptica-- 20 pts\n\n\n\nEl fragmento anterior es un canción del grupo apocalyptica\n\nUsando Monads obtenga la letra \nque menos se repite de todo el fragmento y obtenga la probabilidad de sacar dicha\nletra.\n\nNota: Pueden ayudarse de funciones recursivas y compresiones de lista. \n\n\"\"\"\ncancion = \"\"\"There's a hole in my heart, in my life, in my way\nAnd it's filled with regret and all I did, to push you away\nIf there's still a place in your life, in your heart for me\nI would do anything, so don't ask me to leave\n\nI've got a hole in my soul where you use to be\nYou're the thorn in my heart and you're killing me\nI wish I could go back and do it all differently\nI wish that I'd treated you differently\n'Cause now there's a hole in my soul where you use to be\"\"\"\ncancion = list(cancion)#Lo hacemos una lista\nfrecuenciaPalab = [cancion.count(w.casefold()) for w in cancion] #contamos la frecuencia de cada letra sin importarnos si la letra se repite\nletra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab),cancion) #aplicamos un filtro a esa lista que nos devuela las letras que coinciden con el numero minimo en la frecuencia de letras que ya habiamos calculado\nY = list(letra)#Lo hacemos lista\nY = dict.fromkeys(Y).keys()#Para evitar valores duplicados que en un diccionario no se pueden duplicar los valores\nprint(Y)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,
keep_prob=0.5, scope='model'):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,
keep_prob=0.5, scope='model'):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'
):
with tf.variable_scope(scope, reuse=reuse):
out = x
for i in range(len(self._conv_hidden_dims)):
out = layers.conv2d(out, num_outputs=self._conv_hidden_dims
[i], kernel_size=(5, 5), activation_fn=tf.nn.relu,
trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
out = layers.max_pool2d(out, kernel_size=(2, 2))
out = layers.flatten(out)
out = layers.fully_connected(out, num_outputs=1000,
activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
logits = layers.fully_connected(out, self._num_labels,
trainable=trainable)
return logits
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,
keep_prob=0.5, scope='model'):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'
):
with tf.variable_scope(scope, reuse=reuse):
out = x
for i in range(len(self._conv_hidden_dims)):
out = layers.conv2d(out, num_outputs=self._conv_hidden_dims
[i], kernel_size=(5, 5), activation_fn=tf.nn.relu,
trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
out = layers.max_pool2d(out, kernel_size=(2, 2))
out = layers.flatten(out)
out = layers.fully_connected(out, num_outputs=1000,
activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
logits = layers.fully_connected(out, self._num_labels,
trainable=trainable)
return logits
def _build_model(self):
self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')
x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')
self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],
name='y')
self.lr = tf.placeholder(tf.float32, shape=(), name='lr')
self.logits = self._build_net(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self
.logits, labels=self.y)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,
use_nesterov=True)
self.train_op = optimizer.minimize(loss=self.loss)
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1
), tf.argmax(self.y, 1)), dtype=tf.float32))
self.val_logits = self._build_net(x, reuse=True, trainable=False)
self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.
val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.acc)
self.merged = tf.summary.merge_all()
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as layers
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,
keep_prob=0.5, scope='model'):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'
):
with tf.variable_scope(scope, reuse=reuse):
out = x
for i in range(len(self._conv_hidden_dims)):
out = layers.conv2d(out, num_outputs=self._conv_hidden_dims
[i], kernel_size=(5, 5), activation_fn=tf.nn.relu,
trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
out = layers.max_pool2d(out, kernel_size=(2, 2))
out = layers.flatten(out)
out = layers.fully_connected(out, num_outputs=1000,
activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob,
is_training=trainable)
logits = layers.fully_connected(out, self._num_labels,
trainable=trainable)
return logits
def _build_model(self):
self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')
x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')
self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],
name='y')
self.lr = tf.placeholder(tf.float32, shape=(), name='lr')
self.logits = self._build_net(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self
.logits, labels=self.y)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,
use_nesterov=True)
self.train_op = optimizer.minimize(loss=self.loss)
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1
), tf.argmax(self.y, 1)), dtype=tf.float32))
self.val_logits = self._build_net(x, reuse=True, trainable=False)
self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.
val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.acc)
self.merged = tf.summary.merge_all()
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as layers
class Model(object):
def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10, keep_prob=0.5, scope="model"):
self._batch_size = batch_size
self._learning_rate = learning_rate
self._num_labels = num_labels
self._scope = scope
self._keep_prob = keep_prob
self._conv_hidden_dims = [192, 192]
with tf.variable_scope(self._scope):
self._build_model()
def _build_net(self, x, reuse=False, trainable=True, scope="inference_net"):
with tf.variable_scope(scope, reuse=reuse):
out = x
for i in range(len(self._conv_hidden_dims)):
out = layers.conv2d(out, num_outputs=self._conv_hidden_dims[i], kernel_size=(5, 5),
activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)
out = layers.max_pool2d(out, kernel_size=(2, 2))
out = layers.flatten(out)
out = layers.fully_connected(out, num_outputs=1000, activation_fn=tf.nn.relu, trainable=trainable)
out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)
logits = layers.fully_connected(out, self._num_labels, trainable=trainable)
return logits
def _build_model(self):
self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_') # data gets loaded as a 32x32 vector
x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x') # CIFAR dataset is shape 32,32,3
self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels], name='y') # 10 labels
# self.keep_prob = tf.placeholder(tf.float32, name='dropout_prob')
self.lr = tf.placeholder(tf.float32, shape=(), name='lr')
self.logits = self._build_net(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9, use_nesterov=True)
self.train_op = optimizer.minimize(loss=self.loss)
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
# for eval steps
self.val_logits = self._build_net(x, reuse=True, trainable=False)
self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.acc)
self.merged = tf.summary.merge_all()
|
flexible
|
{
"blob_id": "e9a1fd8464f6c1e65aa2c1af60becbfcbf050814",
"index": 7390,
"step-1": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],\n name='y')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self\n .logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,\n use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1\n ), tf.argmax(self.y, 1)), dtype=tf.float32))\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.\n val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\n\n\nclass Model(object):\n\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10,\n keep_prob=0.5, scope='model'):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope='inference_net'\n ):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims\n [i], kernel_size=(5, 5), activation_fn=tf.nn.relu,\n trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000,\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob,\n is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels,\n trainable=trainable)\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_')\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x')\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels],\n name='y')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self\n .logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9,\n use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1\n ), tf.argmax(self.y, 1)), dtype=tf.float32))\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.\n val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-5": "import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\n\nclass Model(object):\n def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10, keep_prob=0.5, scope=\"model\"):\n self._batch_size = batch_size\n self._learning_rate = learning_rate\n self._num_labels = num_labels\n self._scope = scope\n self._keep_prob = keep_prob\n self._conv_hidden_dims = [192, 192]\n with tf.variable_scope(self._scope):\n self._build_model()\n\n def _build_net(self, x, reuse=False, trainable=True, scope=\"inference_net\"):\n with tf.variable_scope(scope, reuse=reuse):\n out = x\n for i in range(len(self._conv_hidden_dims)):\n out = layers.conv2d(out, num_outputs=self._conv_hidden_dims[i], kernel_size=(5, 5),\n activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)\n out = layers.max_pool2d(out, kernel_size=(2, 2))\n\n out = layers.flatten(out)\n out = layers.fully_connected(out, num_outputs=1000, activation_fn=tf.nn.relu, trainable=trainable)\n out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable)\n logits = layers.fully_connected(out, self._num_labels, trainable=trainable)\n\n return logits\n\n def _build_model(self):\n self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_') # data gets loaded as a 32x32 vector\n x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x') # CIFAR dataset is shape 32,32,3\n self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels], name='y') # 10 labels\n # self.keep_prob = tf.placeholder(tf.float32, name='dropout_prob')\n self.lr = tf.placeholder(tf.float32, shape=(), name='lr')\n\n self.logits = self._build_net(x)\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)\n self.loss = tf.reduce_mean(cross_entropy)\n optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9, use_nesterov=True)\n self.train_op = optimizer.minimize(loss=self.loss)\n self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n\n # for eval steps\n self.val_logits = self._build_net(x, reuse=True, trainable=False)\n self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32))\n\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('accuracy', self.acc)\n self.merged = tf.summary.merge_all()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
T = int(input())
for i in range(T):
start, end = map(int, input().split())
between = end - start
flag = 0
num = 1
while between > 0:
if flag % 2 == 1:
between -= num
num += 1
flag += 1
else:
between -= num
flag += 1
print(flag)
|
normal
|
{
"blob_id": "a96761fc483c0883b058c2b045b038522c23d426",
"index": 3441,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(T):\n start, end = map(int, input().split())\n between = end - start\n flag = 0\n num = 1\n while between > 0:\n if flag % 2 == 1:\n between -= num\n num += 1\n flag += 1\n else:\n between -= num\n flag += 1\n print(flag)\n",
"step-3": "T = int(input())\nfor i in range(T):\n start, end = map(int, input().split())\n between = end - start\n flag = 0\n num = 1\n while between > 0:\n if flag % 2 == 1:\n between -= num\n num += 1\n flag += 1\n else:\n between -= num\n flag += 1\n print(flag)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import math
import random
from PILL import Image, ImageDraw
for i in range(1,1025):
pass
for j in range(1,1025):
pass
epipedo[i][j]
for i in range(1,21):
pass
im = Image.new("RGB", (512, 512), "white")
x=random.choice(1,1025)
y=random.choice(1,1025)
r=random.choice(10,51)
draw = ImageDraw.Draw(im)
draw.ellipse((x-r, y-r, x+r, y+r), fill=(255,255,0), outline ='red')
for j in range(1,4):#apothikeuw ta stoixeia tou kathe kuklou(kentro kai aktina)
pass
if j==1:
pass
kukloi[i][1]=x
if j==2:
pass
kukloi[i][2]=y
if j==3:
pass
kukloi[i][3]=r
for i in range(1,21):
pass
for k in range(i,20):#sugkrinw kathe kuklo me tous upoloipous xwris na epanalambanontai oi idioi elegxoi
pass
a=math.pow(kukloi[k+1][2]-kukloi[i][2], 2)
b=math.pow(kukloi[k+1][1]-kukloi[i][1], 2)
d=math.sqrt(a+b)
if math.fabs(kukloi[i][3]-kykloi[k+1][3])<d and d<kukloi[i][3]+kykloi[k+1][3]:
pass
temkuk=0#oi temonomenoi kukloi
temkuk=temkuk+1
print "temnontai",temkuk, "kukloi"# emfanizei tous temonomenous kuklous
im.show()#kai tin eikona
|
normal
|
{
"blob_id": "a2d2ffe5ed6a844341f7ad731357bb837cee4787",
"index": 6193,
"step-1": "import math\r\nimport random\r\nfrom PILL import Image, ImageDraw\r\nfor i in range(1,1025):\r\n pass\r\n for j in range(1,1025):\r\n pass\r\n epipedo[i][j]\r\nfor i in range(1,21):\r\n pass\r\n im = Image.new(\"RGB\", (512, 512), \"white\")\r\n x=random.choice(1,1025)\r\n y=random.choice(1,1025)\r\n r=random.choice(10,51)\r\n draw = ImageDraw.Draw(im)\r\n draw.ellipse((x-r, y-r, x+r, y+r), fill=(255,255,0), outline ='red')\r\n for j in range(1,4):#apothikeuw ta stoixeia tou kathe kuklou(kentro kai aktina)\r\n pass\r\n if j==1:\r\n pass\r\n kukloi[i][1]=x\r\n if j==2:\r\n pass\r\n kukloi[i][2]=y\r\n if j==3:\r\n pass\r\n kukloi[i][3]=r\r\nfor i in range(1,21):\r\n pass\r\n for k in range(i,20):#sugkrinw kathe kuklo me tous upoloipous xwris na epanalambanontai oi idioi elegxoi\r\n pass\r\n a=math.pow(kukloi[k+1][2]-kukloi[i][2], 2)\r\n b=math.pow(kukloi[k+1][1]-kukloi[i][1], 2)\r\n d=math.sqrt(a+b)\r\n if math.fabs(kukloi[i][3]-kykloi[k+1][3])<d and d<kukloi[i][3]+kykloi[k+1][3]:\r\n pass\r\n temkuk=0#oi temonomenoi kukloi\r\n temkuk=temkuk+1\r\nprint \"temnontai\",temkuk, \"kukloi\"# emfanizei tous temonomenous kuklous\r\nim.show()#kai tin eikona\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Sala:
def __init__(self, sala):
self.Turmas = []
self.numero = sala
def add_turma(self, turma):
# do things
self.Turmas.append(turma)
def __str__(self):
return str(self.numero)
|
normal
|
{
"blob_id": "e41df44db92e2ef7f9c20a0f3052e1c8c28b76c7",
"index": 6174,
"step-1": "class Sala:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Sala:\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.numero)\n",
"step-3": "class Sala:\n <mask token>\n\n def add_turma(self, turma):\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-4": "class Sala:\n\n def __init__(self, sala):\n self.Turmas = []\n self.numero = sala\n\n def add_turma(self, turma):\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-5": "class Sala:\n def __init__(self, sala):\n self.Turmas = []\n self.numero = sala\n\n def add_turma(self, turma):\n # do things\n self.Turmas.append(turma)\n\n def __str__(self):\n return str(self.numero)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FitnerappConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FitnerappConfig(AppConfig):
name = 'fitnerapp'
<|reserved_special_token_1|>
from django.apps import AppConfig
class FitnerappConfig(AppConfig):
name = 'fitnerapp'
|
flexible
|
{
"blob_id": "6546d04d3755d62d1a8756bdec1a10f6f018dcea",
"index": 5638,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FitnerappConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FitnerappConfig(AppConfig):\n name = 'fitnerapp'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FitnerappConfig(AppConfig):\n name = 'fitnerapp'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space) / 2)]]
new_pool = []
for i in range(max_iter):
print('mimic: {}|{}'.format(i + 1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent) < theta:
continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k <= j or child in new_pool:
continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child) >= theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
<|reserved_special_token_1|>
import numpy as np
from sklearn.metrics import mutual_info_score
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space) / 2)]]
new_pool = []
for i in range(max_iter):
print('mimic: {}|{}'.format(i + 1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent) < theta:
continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k <= j or child in new_pool:
continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child) >= theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent, child)
<|reserved_special_token_1|>
import numpy as np
from sklearn.metrics import mutual_info_score
def mimic_binary(max_iter=100, fitness_func=None, space=None):
assert fitness_func is not None
assert space is not None
idx = np.random.permutation(np.arange(len(space)))
pool = space[idx[:int(len(space)/2)]] # randomly sample 50% of the oringal space
new_pool = []
for i in range(max_iter):
print("mimic: {}|{}".format(i+1, max_iter))
theta += delta
for j, parent in enumerate(pool):
if j in new_pool or fitness_func(parent)<theta: continue
best_score = 0
best_child = parent
for k, child in enumerate(pool):
if k<=j or child in new_pool: continue
score = mutual_info(parent, child)
if score > best_score and fitness_func(child)>=theta:
best_score = score
new_pool.append(parent)
new_pool.append(child)
return None
def mutual_info(parent, child):
parent = [int(x) for x in parent]
child = [int(x) for x in child]
return mutual_info_score(parent,child)
|
flexible
|
{
"blob_id": "360e661d8538a8f40b7546a54e9a9582fa64bd67",
"index": 700,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n",
"step-3": "<mask token>\n\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n assert fitness_func is not None\n assert space is not None\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space) / 2)]]\n new_pool = []\n for i in range(max_iter):\n print('mimic: {}|{}'.format(i + 1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent) < theta:\n continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k <= j or child in new_pool:\n continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child) >= theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n",
"step-4": "import numpy as np\nfrom sklearn.metrics import mutual_info_score\n\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n assert fitness_func is not None\n assert space is not None\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space) / 2)]]\n new_pool = []\n for i in range(max_iter):\n print('mimic: {}|{}'.format(i + 1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent) < theta:\n continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k <= j or child in new_pool:\n continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child) >= theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent, child)\n",
"step-5": "import numpy as np\nfrom sklearn.metrics import mutual_info_score\n\ndef mimic_binary(max_iter=100, fitness_func=None, space=None):\n\n assert fitness_func is not None\n assert space is not None\n\n idx = np.random.permutation(np.arange(len(space)))\n pool = space[idx[:int(len(space)/2)]] # randomly sample 50% of the oringal space\n\n new_pool = []\n\n for i in range(max_iter):\n print(\"mimic: {}|{}\".format(i+1, max_iter))\n theta += delta\n for j, parent in enumerate(pool):\n if j in new_pool or fitness_func(parent)<theta: continue\n best_score = 0\n best_child = parent\n for k, child in enumerate(pool):\n if k<=j or child in new_pool: continue\n score = mutual_info(parent, child)\n if score > best_score and fitness_func(child)>=theta:\n best_score = score\n new_pool.append(parent)\n new_pool.append(child)\n return None\n\ndef mutual_info(parent, child):\n parent = [int(x) for x in parent]\n child = [int(x) for x in child]\n return mutual_info_score(parent,child)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DataSet:
def __init__(self, training_folder):
self.training_folder = training_folder
print('load Data')
<|reserved_special_token_0|>
def readFiles(self, queue, file_list, start, end):
print('start-read-file')
print('start ', start)
print('end ', end)
print('file_list ', str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder + filename)
print('Path: ' + filename)
queue.put(load)
print('finished')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataSet:
def __init__(self, training_folder):
self.training_folder = training_folder
print('load Data')
<|reserved_special_token_0|>
def readFiles(self, queue, file_list, start, end):
print('start-read-file')
print('start ', start)
print('end ', end)
print('file_list ', str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder + filename)
print('Path: ' + filename)
queue.put(load)
print('finished')
def main(self):
queue = mp.Queue()
file_list = os.listdir(self.training_folder)
time_before = time.time()
processes = []
file_batch_size = 50
steps = int(len(file_list) / file_batch_size) + 1
print(steps)
for file_batch in range(steps):
print('run', file_batch)
start_read = file_batch * file_batch_size
end_read = file_batch * file_batch_size + file_batch_size
if len(file_list) < end_read:
end_read = len(file_list)
process = mp.Process(target=self.readFiles, args=(queue,
file_list, start_read, end_read))
processes.append(process)
for process in processes:
print('start process')
process.start()
returns = []
for process in processes:
ret = queue.get()
returns += ret
for process in processes:
process.join()
process.join()
print(len(returns))
print('time difference: ', str(time.time() - time_before))
return returns
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataSet:
def __init__(self, training_folder):
self.training_folder = training_folder
print('load Data')
def loadMelAndStft(self, filename):
wav, sr = librosa.load(filename)
stft_in = librosa.stft(wav)
mel_in = librosa.feature.melspectrogram(S=stft_in)
stft_in = np.array(stft_in)
mel_in = np.array(mel_in)
mel_in = np.swapaxes(mel_in, 0, 1)
stft_in = np.swapaxes(stft_in, 0, 1)
mel_and_stft = []
input_overlap_per_side = 1
for element in range(mel_in.shape[0]):
if element > input_overlap_per_side and element < mel_in.shape[0
] - input_overlap_per_side:
mel_in_with_overlap = []
for number in range(input_overlap_per_side * 2 + 1):
actual_mel_index = (element - input_overlap_per_side +
number)
mel_in_with_overlap.append(mel_in[actual_mel_index])
mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype
=np.float32).flatten()
stft_in = np.asarray(stft_in, dtype=np.float32)
mel_and_stft.append([mel_in_with_overlap, stft_in[element]])
return mel_and_stft
def readFiles(self, queue, file_list, start, end):
print('start-read-file')
print('start ', start)
print('end ', end)
print('file_list ', str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder + filename)
print('Path: ' + filename)
queue.put(load)
print('finished')
def main(self):
queue = mp.Queue()
file_list = os.listdir(self.training_folder)
time_before = time.time()
processes = []
file_batch_size = 50
steps = int(len(file_list) / file_batch_size) + 1
print(steps)
for file_batch in range(steps):
print('run', file_batch)
start_read = file_batch * file_batch_size
end_read = file_batch * file_batch_size + file_batch_size
if len(file_list) < end_read:
end_read = len(file_list)
process = mp.Process(target=self.readFiles, args=(queue,
file_list, start_read, end_read))
processes.append(process)
for process in processes:
print('start process')
process.start()
returns = []
for process in processes:
ret = queue.get()
returns += ret
for process in processes:
process.join()
process.join()
print(len(returns))
print('time difference: ', str(time.time() - time_before))
return returns
<|reserved_special_token_1|>
import librosa
import librosa.display
import matplotlib.pyplot as plt
import os
import numpy as np
import time
import multiprocessing as mp
from tempfile import TemporaryFile
class DataSet:
def __init__(self, training_folder):
self.training_folder = training_folder
print('load Data')
def loadMelAndStft(self, filename):
wav, sr = librosa.load(filename)
stft_in = librosa.stft(wav)
mel_in = librosa.feature.melspectrogram(S=stft_in)
stft_in = np.array(stft_in)
mel_in = np.array(mel_in)
mel_in = np.swapaxes(mel_in, 0, 1)
stft_in = np.swapaxes(stft_in, 0, 1)
mel_and_stft = []
input_overlap_per_side = 1
for element in range(mel_in.shape[0]):
if element > input_overlap_per_side and element < mel_in.shape[0
] - input_overlap_per_side:
mel_in_with_overlap = []
for number in range(input_overlap_per_side * 2 + 1):
actual_mel_index = (element - input_overlap_per_side +
number)
mel_in_with_overlap.append(mel_in[actual_mel_index])
mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype
=np.float32).flatten()
stft_in = np.asarray(stft_in, dtype=np.float32)
mel_and_stft.append([mel_in_with_overlap, stft_in[element]])
return mel_and_stft
def readFiles(self, queue, file_list, start, end):
print('start-read-file')
print('start ', start)
print('end ', end)
print('file_list ', str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder + filename)
print('Path: ' + filename)
queue.put(load)
print('finished')
def main(self):
queue = mp.Queue()
file_list = os.listdir(self.training_folder)
time_before = time.time()
processes = []
file_batch_size = 50
steps = int(len(file_list) / file_batch_size) + 1
print(steps)
for file_batch in range(steps):
print('run', file_batch)
start_read = file_batch * file_batch_size
end_read = file_batch * file_batch_size + file_batch_size
if len(file_list) < end_read:
end_read = len(file_list)
process = mp.Process(target=self.readFiles, args=(queue,
file_list, start_read, end_read))
processes.append(process)
for process in processes:
print('start process')
process.start()
returns = []
for process in processes:
ret = queue.get()
returns += ret
for process in processes:
process.join()
process.join()
print(len(returns))
print('time difference: ', str(time.time() - time_before))
return returns
<|reserved_special_token_1|>
import librosa
import librosa.display
import matplotlib.pyplot as plt
import os
import numpy as np
import time
import multiprocessing as mp
from tempfile import TemporaryFile
class DataSet():
def __init__(self,training_folder):
self.training_folder = training_folder
print("load Data")
def loadMelAndStft(self,filename):
wav, sr = librosa.load(filename)
stft_in = librosa.stft(wav)
mel_in = librosa.feature.melspectrogram(S=stft_in)
stft_in = np.array(stft_in)
mel_in = np.array(mel_in)
mel_in = np.swapaxes(mel_in, 0, 1)
stft_in = np.swapaxes(stft_in, 0, 1)
mel_and_stft = []
input_overlap_per_side = 1
for element in range(mel_in.shape[0]):
if(element > input_overlap_per_side and element < mel_in.shape[0]-input_overlap_per_side):
mel_in_with_overlap = []
for number in range(input_overlap_per_side*2+1):
actual_mel_index = element - input_overlap_per_side + number
mel_in_with_overlap.append(mel_in[actual_mel_index])
mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype=np.float32).flatten()
stft_in =np.asarray(stft_in, dtype=np.float32)
mel_and_stft.append([mel_in_with_overlap,stft_in[element]])
return mel_and_stft
def readFiles(self,queue,file_list,start,end):
print("start-read-file")
print("start ",start)
print("end ",end)
print("file_list ",str(len(file_list)))
load = []
for filename in file_list[start:end]:
load += self.loadMelAndStft(self.training_folder+filename)
print("Path: " + filename)
queue.put(load)
print("finished")
def main(self):
queue = mp.Queue()
file_list = os.listdir(self.training_folder)
time_before = time.time()
processes = []
file_batch_size = 50
steps= int(len(file_list)/file_batch_size)+1
print(steps)
for file_batch in range(steps):
print("run",file_batch)
start_read = file_batch*file_batch_size
end_read = file_batch*file_batch_size+file_batch_size
if len(file_list) < end_read:
end_read = len(file_list)
process = mp.Process(target=self.readFiles, args=(queue,file_list,start_read,end_read))
processes.append(process)
for process in processes:
print("start process")
process.start()
returns = []
for process in processes:
ret = queue.get() # will block
returns += ret
for process in processes:
process.join()
process.join()
print(len(returns))
print("time difference: ", str(time.time()-time_before))
return returns
|
flexible
|
{
"blob_id": "ba09dbe3fbca51ece8a7d482324a2dec32e7dc8a",
"index": 5016,
"step-1": "<mask token>\n\n\nclass DataSet:\n\n def __init__(self, training_folder):\n self.training_folder = training_folder\n print('load Data')\n <mask token>\n\n def readFiles(self, queue, file_list, start, end):\n print('start-read-file')\n print('start ', start)\n print('end ', end)\n print('file_list ', str(len(file_list)))\n load = []\n for filename in file_list[start:end]:\n load += self.loadMelAndStft(self.training_folder + filename)\n print('Path: ' + filename)\n queue.put(load)\n print('finished')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSet:\n\n def __init__(self, training_folder):\n self.training_folder = training_folder\n print('load Data')\n <mask token>\n\n def readFiles(self, queue, file_list, start, end):\n print('start-read-file')\n print('start ', start)\n print('end ', end)\n print('file_list ', str(len(file_list)))\n load = []\n for filename in file_list[start:end]:\n load += self.loadMelAndStft(self.training_folder + filename)\n print('Path: ' + filename)\n queue.put(load)\n print('finished')\n\n def main(self):\n queue = mp.Queue()\n file_list = os.listdir(self.training_folder)\n time_before = time.time()\n processes = []\n file_batch_size = 50\n steps = int(len(file_list) / file_batch_size) + 1\n print(steps)\n for file_batch in range(steps):\n print('run', file_batch)\n start_read = file_batch * file_batch_size\n end_read = file_batch * file_batch_size + file_batch_size\n if len(file_list) < end_read:\n end_read = len(file_list)\n process = mp.Process(target=self.readFiles, args=(queue,\n file_list, start_read, end_read))\n processes.append(process)\n for process in processes:\n print('start process')\n process.start()\n returns = []\n for process in processes:\n ret = queue.get()\n returns += ret\n for process in processes:\n process.join()\n process.join()\n print(len(returns))\n print('time difference: ', str(time.time() - time_before))\n return returns\n",
"step-3": "<mask token>\n\n\nclass DataSet:\n\n def __init__(self, training_folder):\n self.training_folder = training_folder\n print('load Data')\n\n def loadMelAndStft(self, filename):\n wav, sr = librosa.load(filename)\n stft_in = librosa.stft(wav)\n mel_in = librosa.feature.melspectrogram(S=stft_in)\n stft_in = np.array(stft_in)\n mel_in = np.array(mel_in)\n mel_in = np.swapaxes(mel_in, 0, 1)\n stft_in = np.swapaxes(stft_in, 0, 1)\n mel_and_stft = []\n input_overlap_per_side = 1\n for element in range(mel_in.shape[0]):\n if element > input_overlap_per_side and element < mel_in.shape[0\n ] - input_overlap_per_side:\n mel_in_with_overlap = []\n for number in range(input_overlap_per_side * 2 + 1):\n actual_mel_index = (element - input_overlap_per_side +\n number)\n mel_in_with_overlap.append(mel_in[actual_mel_index])\n mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype\n =np.float32).flatten()\n stft_in = np.asarray(stft_in, dtype=np.float32)\n mel_and_stft.append([mel_in_with_overlap, stft_in[element]])\n return mel_and_stft\n\n def readFiles(self, queue, file_list, start, end):\n print('start-read-file')\n print('start ', start)\n print('end ', end)\n print('file_list ', str(len(file_list)))\n load = []\n for filename in file_list[start:end]:\n load += self.loadMelAndStft(self.training_folder + filename)\n print('Path: ' + filename)\n queue.put(load)\n print('finished')\n\n def main(self):\n queue = mp.Queue()\n file_list = os.listdir(self.training_folder)\n time_before = time.time()\n processes = []\n file_batch_size = 50\n steps = int(len(file_list) / file_batch_size) + 1\n print(steps)\n for file_batch in range(steps):\n print('run', file_batch)\n start_read = file_batch * file_batch_size\n end_read = file_batch * file_batch_size + file_batch_size\n if len(file_list) < end_read:\n end_read = len(file_list)\n process = mp.Process(target=self.readFiles, args=(queue,\n file_list, start_read, end_read))\n processes.append(process)\n for process in processes:\n print('start process')\n process.start()\n returns = []\n for process in processes:\n ret = queue.get()\n returns += ret\n for process in processes:\n process.join()\n process.join()\n print(len(returns))\n print('time difference: ', str(time.time() - time_before))\n return returns\n",
"step-4": "import librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport time\nimport multiprocessing as mp\nfrom tempfile import TemporaryFile\n\n\nclass DataSet:\n\n def __init__(self, training_folder):\n self.training_folder = training_folder\n print('load Data')\n\n def loadMelAndStft(self, filename):\n wav, sr = librosa.load(filename)\n stft_in = librosa.stft(wav)\n mel_in = librosa.feature.melspectrogram(S=stft_in)\n stft_in = np.array(stft_in)\n mel_in = np.array(mel_in)\n mel_in = np.swapaxes(mel_in, 0, 1)\n stft_in = np.swapaxes(stft_in, 0, 1)\n mel_and_stft = []\n input_overlap_per_side = 1\n for element in range(mel_in.shape[0]):\n if element > input_overlap_per_side and element < mel_in.shape[0\n ] - input_overlap_per_side:\n mel_in_with_overlap = []\n for number in range(input_overlap_per_side * 2 + 1):\n actual_mel_index = (element - input_overlap_per_side +\n number)\n mel_in_with_overlap.append(mel_in[actual_mel_index])\n mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype\n =np.float32).flatten()\n stft_in = np.asarray(stft_in, dtype=np.float32)\n mel_and_stft.append([mel_in_with_overlap, stft_in[element]])\n return mel_and_stft\n\n def readFiles(self, queue, file_list, start, end):\n print('start-read-file')\n print('start ', start)\n print('end ', end)\n print('file_list ', str(len(file_list)))\n load = []\n for filename in file_list[start:end]:\n load += self.loadMelAndStft(self.training_folder + filename)\n print('Path: ' + filename)\n queue.put(load)\n print('finished')\n\n def main(self):\n queue = mp.Queue()\n file_list = os.listdir(self.training_folder)\n time_before = time.time()\n processes = []\n file_batch_size = 50\n steps = int(len(file_list) / file_batch_size) + 1\n print(steps)\n for file_batch in range(steps):\n print('run', file_batch)\n start_read = file_batch * file_batch_size\n end_read = file_batch * file_batch_size + file_batch_size\n if len(file_list) < end_read:\n end_read = len(file_list)\n process = mp.Process(target=self.readFiles, args=(queue,\n file_list, start_read, end_read))\n processes.append(process)\n for process in processes:\n print('start process')\n process.start()\n returns = []\n for process in processes:\n ret = queue.get()\n returns += ret\n for process in processes:\n process.join()\n process.join()\n print(len(returns))\n print('time difference: ', str(time.time() - time_before))\n return returns\n",
"step-5": "import librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport time\nimport multiprocessing as mp\nfrom tempfile import TemporaryFile\n\nclass DataSet():\n def __init__(self,training_folder):\n self.training_folder = training_folder\n print(\"load Data\")\n\n def loadMelAndStft(self,filename):\n wav, sr = librosa.load(filename)\n stft_in = librosa.stft(wav)\n mel_in = librosa.feature.melspectrogram(S=stft_in)\n stft_in = np.array(stft_in)\n mel_in = np.array(mel_in)\n\n mel_in = np.swapaxes(mel_in, 0, 1)\n stft_in = np.swapaxes(stft_in, 0, 1)\n\n mel_and_stft = []\n input_overlap_per_side = 1\n for element in range(mel_in.shape[0]):\n if(element > input_overlap_per_side and element < mel_in.shape[0]-input_overlap_per_side):\n mel_in_with_overlap = []\n for number in range(input_overlap_per_side*2+1):\n actual_mel_index = element - input_overlap_per_side + number\n mel_in_with_overlap.append(mel_in[actual_mel_index])\n mel_in_with_overlap = np.asarray(mel_in_with_overlap, dtype=np.float32).flatten()\n stft_in =np.asarray(stft_in, dtype=np.float32)\n mel_and_stft.append([mel_in_with_overlap,stft_in[element]])\n\n return mel_and_stft\n\n def readFiles(self,queue,file_list,start,end):\n print(\"start-read-file\")\n print(\"start \",start)\n print(\"end \",end)\n print(\"file_list \",str(len(file_list)))\n load = []\n for filename in file_list[start:end]:\n load += self.loadMelAndStft(self.training_folder+filename)\n print(\"Path: \" + filename)\n queue.put(load)\n print(\"finished\")\n\n def main(self):\n queue = mp.Queue()\n file_list = os.listdir(self.training_folder)\n\n time_before = time.time()\n processes = []\n file_batch_size = 50\n steps= int(len(file_list)/file_batch_size)+1\n print(steps)\n for file_batch in range(steps):\n print(\"run\",file_batch)\n start_read = file_batch*file_batch_size\n\n end_read = file_batch*file_batch_size+file_batch_size\n if len(file_list) < end_read:\n end_read = len(file_list)\n\n process = mp.Process(target=self.readFiles, args=(queue,file_list,start_read,end_read))\n processes.append(process)\n\n for process in processes:\n print(\"start process\")\n process.start()\n returns = []\n for process in processes:\n ret = queue.get() # will block\n returns += ret\n for process in processes:\n process.join()\n process.join()\n print(len(returns))\n print(\"time difference: \", str(time.time()-time_before))\n return returns\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
txt = './KF_neko.txt.mecab'
mapData = {}
listData = []
with open('./KF31.txt', 'w') as writeFile:
with open(txt, 'r') as readFile:
for text in readFile:
# print(text)
# \tで区切って先頭だけ見る
listData = text.split('\t')
# 表層形
surface = listData[0]
# EOSが入ってたら消す
if surface == 'EOS\n':
surface = ''
# print(surface)
# 表層形以外をバラす
splitted = listData[-1].split(',')
# EOSが入ってたら消す
if splitted == 'EOS\n':
continue
else:
# 品詞
pos = splitted[0]
if pos in ('動詞'):
dousiSurface = surface
writeFile.write(dousiSurface+'\n')
|
normal
|
{
"blob_id": "778ee9a0ea7f57535b4de88a38cd741f2d46e092",
"index": 6966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n listData = text.split('\\t')\n surface = listData[0]\n if surface == 'EOS\\n':\n surface = ''\n splitted = listData[-1].split(',')\n if splitted == 'EOS\\n':\n continue\n else:\n pos = splitted[0]\n if pos in '動詞':\n dousiSurface = surface\n writeFile.write(dousiSurface + '\\n')\n",
"step-3": "txt = './KF_neko.txt.mecab'\nmapData = {}\nlistData = []\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n listData = text.split('\\t')\n surface = listData[0]\n if surface == 'EOS\\n':\n surface = ''\n splitted = listData[-1].split(',')\n if splitted == 'EOS\\n':\n continue\n else:\n pos = splitted[0]\n if pos in '動詞':\n dousiSurface = surface\n writeFile.write(dousiSurface + '\\n')\n",
"step-4": "txt = './KF_neko.txt.mecab'\nmapData = {}\nlistData = []\nwith open('./KF31.txt', 'w') as writeFile:\n with open(txt, 'r') as readFile:\n for text in readFile:\n # print(text)\n # \\tで区切って先頭だけ見る\n listData = text.split('\\t')\n # 表層形\n surface = listData[0]\n # EOSが入ってたら消す\n if surface == 'EOS\\n':\n surface = ''\n # print(surface)\n # 表層形以外をバラす\n splitted = listData[-1].split(',')\n # EOSが入ってたら消す\n if splitted == 'EOS\\n':\n continue\n else:\n # 品詞\n pos = splitted[0]\n if pos in ('動詞'):\n dousiSurface = surface\n writeFile.write(dousiSurface+'\\n')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Unit test for `redi.create_summary_report()`
'''
import unittest
import os
import sys
from lxml import etree
from StringIO import StringIO
import time
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {
'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {
'total_subjects': 5,
'form_details': {
'Total_chemistry_Forms': 22,
'Total_cbc_Forms': 53
},
'subject_details': {
'60': {'cbc_Forms': 1, 'chemistry_Forms': 1},
'61': {'cbc_Forms': 2, 'chemistry_Forms': 1},
'63': {'cbc_Forms': 11, 'chemistry_Forms': 4},
'59': {'cbc_Forms': 39, 'chemistry_Forms': 16}
},
'errors' : [],
}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {
'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'],
'max_event_alert': [
'This is max event alert 1',
'This is max event alert 2',
'This is max event alert 3']
}
self.expected_xml = '''
<report>
<header>
<project>hcvtarget-uf</project>
<date>'''+time.strftime("%m/%d/%Y")+'''</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>'''
self.schema_str = StringIO('''\
<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root+'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(\
self.test_report_params, \
self.test_report_data, \
self.test_alert_summary, \
self.specimen_taken_time_summary)
result_string = etree.tostring(result)
#print result_string
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
# validate the xml against the xsd schema
self.assertEqual(xml_schema.validate(result), True)
# validate the actual data in xml but strip the white space first
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
# delete the created xml file
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f9dd21aac7915b9bbf91eeffb5fd58ffdb43c6c3",
"index": 5857,
"step-1": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nUnit test for `redi.create_summary_report()`\n'''\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, \"../\")\nproj_root = os.path.abspath(goal_dir)+'/'\n\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {\n 'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n\n self.test_report_data = {\n 'total_subjects': 5,\n 'form_details': {\n 'Total_chemistry_Forms': 22,\n 'Total_cbc_Forms': 53\n },\n 'subject_details': {\n '60': {'cbc_Forms': 1, 'chemistry_Forms': 1},\n '61': {'cbc_Forms': 2, 'chemistry_Forms': 1},\n '63': {'cbc_Forms': 11, 'chemistry_Forms': 4},\n '59': {'cbc_Forms': 39, 'chemistry_Forms': 16}\n },\n 'errors' : [],\n }\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {\n 'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'],\n 'max_event_alert': [\n 'This is max event alert 1',\n 'This is max event alert 2',\n 'This is max event alert 3']\n }\n self.expected_xml = '''\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>'''+time.strftime(\"%m/%d/%Y\")+'''</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>'''\n\n self.schema_str = StringIO('''\\\n <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>''')\n return\n\n def test_create_summary_report(self):\n\n sys.path.append('config')\n self.newpath = proj_root+'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n\n result = redi.create_summary_report(\\\n self.test_report_params, \\\n self.test_report_data, \\\n self.test_alert_summary, \\\n self.specimen_taken_time_summary)\n result_string = etree.tostring(result)\n #print result_string\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n # validate the xml against the xsd schema\n self.assertEqual(xml_schema.validate(result), True)\n # validate the actual data in xml but strip the white space first\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n # delete the created xml file\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str)
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(self, 'Open', self.
__music_dir, 'Mp3 Files (*.mp3)')
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, 'Open error', error.message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Controls(QWidget):
<|reserved_special_token_0|>
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str)
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(self, 'Open', self.
__music_dir, 'Mp3 Files (*.mp3)')
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, 'Open error', error.message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Controls(QWidget):
def __init__(self, parent):
super(Controls, self).__init__(parent)
self.layout = QHBoxLayout(self)
self.openButton = QPushButton('Open', self)
self.layout.addWidget(self.openButton)
self.playPauseButton = QPushButton('Play', self)
self.layout.addWidget(self.playPauseButton)
self.nextButton = QPushButton('Next', self)
self.layout.addWidget(self.nextButton)
self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)
self.__nextShortcut.activated.connect(self.nextButton.click)
self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self
)
self.__playPauseShortcut.activated.connect(self.playPauseButton.click)
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str)
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(self, 'Open', self.
__music_dir, 'Mp3 Files (*.mp3)')
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, 'Open error', error.message)
<|reserved_special_token_1|>
import random
from PyQt4.QtGui import QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut, QKeySequence, QFileDialog, QMessageBox
from PyQt4 import QtCore
class Controls(QWidget):
def __init__(self, parent):
super(Controls, self).__init__(parent)
self.layout = QHBoxLayout(self)
self.openButton = QPushButton('Open', self)
self.layout.addWidget(self.openButton)
self.playPauseButton = QPushButton('Play', self)
self.layout.addWidget(self.playPauseButton)
self.nextButton = QPushButton('Next', self)
self.layout.addWidget(self.nextButton)
self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)
self.__nextShortcut.activated.connect(self.nextButton.click)
self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self
)
self.__playPauseShortcut.activated.connect(self.playPauseButton.click)
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str)
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(self, 'Open', self.
__music_dir, 'Mp3 Files (*.mp3)')
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, 'Open error', error.message)
<|reserved_special_token_1|>
import random
from PyQt4.QtGui import (
QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut,
QKeySequence, QFileDialog, QMessageBox)
from PyQt4 import QtCore
class Controls(QWidget):
def __init__(self, parent):
super(Controls, self).__init__(parent)
self.layout = QHBoxLayout(self)
self.openButton = QPushButton('Open', self)
self.layout.addWidget(self.openButton)
self.playPauseButton = QPushButton('Play', self) # TODO implement pausing
self.layout.addWidget(self.playPauseButton)
self.nextButton = QPushButton('Next', self)
self.layout.addWidget(self.nextButton)
self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)
self.__nextShortcut.activated.connect(self.nextButton.click)
self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self)
self.__playPauseShortcut.activated.connect(self.playPauseButton.click)
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str) # arg is path to file
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(
self, "Open", self.__music_dir, "Mp3 Files (*.mp3)")
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, "Open error", error.message)
|
flexible
|
{
"blob_id": "4e86dd74374297c3b0ce8fea93910003dac7d5d7",
"index": 8742,
"step-1": "<mask token>\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-2": "<mask token>\n\n\nclass Controls(QWidget):\n <mask token>\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-3": "<mask token>\n\n\nclass Controls(QWidget):\n\n def __init__(self, parent):\n super(Controls, self).__init__(parent)\n self.layout = QHBoxLayout(self)\n self.openButton = QPushButton('Open', self)\n self.layout.addWidget(self.openButton)\n self.playPauseButton = QPushButton('Play', self)\n self.layout.addWidget(self.playPauseButton)\n self.nextButton = QPushButton('Next', self)\n self.layout.addWidget(self.nextButton)\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\n self.__nextShortcut.activated.connect(self.nextButton.click)\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self\n )\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-4": "import random\nfrom PyQt4.QtGui import QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut, QKeySequence, QFileDialog, QMessageBox\nfrom PyQt4 import QtCore\n\n\nclass Controls(QWidget):\n\n def __init__(self, parent):\n super(Controls, self).__init__(parent)\n self.layout = QHBoxLayout(self)\n self.openButton = QPushButton('Open', self)\n self.layout.addWidget(self.openButton)\n self.playPauseButton = QPushButton('Play', self)\n self.layout.addWidget(self.playPauseButton)\n self.nextButton = QPushButton('Next', self)\n self.layout.addWidget(self.nextButton)\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\n self.__nextShortcut.activated.connect(self.nextButton.click)\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self\n )\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-5": "import random\r\n\r\nfrom PyQt4.QtGui import (\r\n QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut,\r\n QKeySequence, QFileDialog, QMessageBox)\r\nfrom PyQt4 import QtCore\r\n\r\nclass Controls(QWidget):\r\n def __init__(self, parent): \r\n super(Controls, self).__init__(parent)\r\n self.layout = QHBoxLayout(self)\r\n\r\n self.openButton = QPushButton('Open', self)\r\n self.layout.addWidget(self.openButton)\r\n\r\n self.playPauseButton = QPushButton('Play', self) # TODO implement pausing\r\n self.layout.addWidget(self.playPauseButton)\r\n\r\n self.nextButton = QPushButton('Next', self)\r\n self.layout.addWidget(self.nextButton)\r\n \r\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\r\n self.__nextShortcut.activated.connect(self.nextButton.click)\r\n\r\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self)\r\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n playSong = QtCore.pyqtSignal(str) # arg is path to file\r\n\r\n def __init__(self, music_dir):\r\n super(MainWindow, self).__init__()\r\n\r\n self.__music_dir = music_dir\r\n\r\n self.resize(400, 70)\r\n self.move(0, 0)\r\n self.setWindowTitle('Drink')\r\n self.setWindowIcon(QIcon('icon.png'))\r\n \r\n self.controls = Controls(self)\r\n self.setCentralWidget(self.controls)\r\n\r\n self.controls.openButton.clicked.connect(self.open)\r\n\r\n self.show()\r\n\r\n def open(self):\r\n try:\r\n fileName = QFileDialog.getOpenFileName(\r\n self, \"Open\", self.__music_dir, \"Mp3 Files (*.mp3)\")\r\n self.playSong.emit(fileName)\r\n except Exception as error:\r\n QMessageBox.critical(self, \"Open error\", error.message)\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def det_cell_king(field):
global cell_king
cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,
fig in enumerate(row) if abs(fig) == 6}
return cell_king
<|reserved_special_token_0|>
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1
), cont[2] + d * (sign(y - 3) + 1)
def trans_pawn(color, old):
return True if old[0] * color % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0] - old[0]) == 2:
take_on_aisle = color, new[1]
else:
take_on_aisle = 'l', 8
return take_on_aisle
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start_parameter_2(par):
global cell_king, castling_control, trans, take_on_aisle
cell_king = par[0]
castling_control = par[1]
trans = par[2]
take_on_aisle = par[3]
def det_cell_king(field):
global cell_king
cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,
fig in enumerate(row) if abs(fig) == 6}
return cell_king
def det_castling_control(field):
global castling_control
for color in (1, -1):
hor = 0 if color == 1 else 7
dk = 0 if field[hor][4] == 6 * color else 1
dlr = 0 if field[hor][0] == 2 * color else 1
drr = 0 if field[hor][-1] == 2 * color else 1
castling_control[color] = dk, dlr, drr
return castling_control
<|reserved_special_token_0|>
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1
), cont[2] + d * (sign(y - 3) + 1)
def trans_pawn(color, old):
return True if old[0] * color % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0] - old[0]) == 2:
take_on_aisle = color, new[1]
else:
take_on_aisle = 'l', 8
return take_on_aisle
def take_on_aisle_move(field, color, old, new, fig, d, main):
global take_on_aisle
if main == 1:
take_on_aisle_pawn(color, old, new)
if abs(old[1] - new[1]) == 1:
if field[new[0]][new[1]] == 0 and d == 1:
field[old[0]][new[1]] = 0
if fig == 0 and d == -1:
field[new[0]][old[1]] = -color
def move(field, old, new, fig=0, d=1, trans_fig=1, main=0):
global trans, take_on_aisle
color = sign(field[old[0]][old[1]])
figure = abs(field[old[0]][old[1]])
if figure == 2:
rook(field, color, old, new, d)
if figure == 6:
king_and_castling(field, color, old, new, d)
if trans == True:
figure = 1
trans = False
if figure == 1:
trans = trans_pawn(color, old) if d == 1 else False
if trans == True:
figure = trans_fig
take_on_aisle_move(field, color, old, new, fig, d, main)
if main == 1:
trans = False
field[new[0]][new[1]] = color * figure
field[old[0]][old[1]] = fig
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start_parameter_2(par):
global cell_king, castling_control, trans, take_on_aisle
cell_king = par[0]
castling_control = par[1]
trans = par[2]
take_on_aisle = par[3]
def det_cell_king(field):
global cell_king
cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,
fig in enumerate(row) if abs(fig) == 6}
return cell_king
def det_castling_control(field):
global castling_control
for color in (1, -1):
hor = 0 if color == 1 else 7
dk = 0 if field[hor][4] == 6 * color else 1
dlr = 0 if field[hor][0] == 2 * color else 1
drr = 0 if field[hor][-1] == 2 * color else 1
castling_control[color] = dk, dlr, drr
return castling_control
def king_and_castling(field, color, old, new, d):
global cell_king, castling_control
cell_king[color] = new[0], new[1]
storlg = new[1] - old[1]
if abs(storlg) == 2:
storlg = sign(storlg)
rp = 7 if storlg * d == 1 else 0
field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0
field[new[0]][rp] = 0 if d == 1 else 2 * color
cont = castling_control[color]
castling_control[color] = cont[0], cont[1] - storlg + d, cont[2
] + storlg + d
castling_control[color] = castling_control[color][0] + d, castling_control[
color][1], castling_control[color][2]
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1
), cont[2] + d * (sign(y - 3) + 1)
def trans_pawn(color, old):
return True if old[0] * color % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0] - old[0]) == 2:
take_on_aisle = color, new[1]
else:
take_on_aisle = 'l', 8
return take_on_aisle
def take_on_aisle_move(field, color, old, new, fig, d, main):
global take_on_aisle
if main == 1:
take_on_aisle_pawn(color, old, new)
if abs(old[1] - new[1]) == 1:
if field[new[0]][new[1]] == 0 and d == 1:
field[old[0]][new[1]] = 0
if fig == 0 and d == -1:
field[new[0]][old[1]] = -color
def move(field, old, new, fig=0, d=1, trans_fig=1, main=0):
global trans, take_on_aisle
color = sign(field[old[0]][old[1]])
figure = abs(field[old[0]][old[1]])
if figure == 2:
rook(field, color, old, new, d)
if figure == 6:
king_and_castling(field, color, old, new, d)
if trans == True:
figure = 1
trans = False
if figure == 1:
trans = trans_pawn(color, old) if d == 1 else False
if trans == True:
figure = trans_fig
take_on_aisle_move(field, color, old, new, fig, d, main)
if main == 1:
trans = False
field[new[0]][new[1]] = color * figure
field[old[0]][old[1]] = fig
<|reserved_special_token_1|>
from field import print_field
from math_utilite import sign, col
def start_parameter_2(par):
global cell_king, castling_control, trans, take_on_aisle
cell_king = par[0]
castling_control = par[1]
trans = par[2]
take_on_aisle = par[3]
def det_cell_king(field):
global cell_king
cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,
fig in enumerate(row) if abs(fig) == 6}
return cell_king
def det_castling_control(field):
global castling_control
for color in (1, -1):
hor = 0 if color == 1 else 7
dk = 0 if field[hor][4] == 6 * color else 1
dlr = 0 if field[hor][0] == 2 * color else 1
drr = 0 if field[hor][-1] == 2 * color else 1
castling_control[color] = dk, dlr, drr
return castling_control
def king_and_castling(field, color, old, new, d):
global cell_king, castling_control
cell_king[color] = new[0], new[1]
storlg = new[1] - old[1]
if abs(storlg) == 2:
storlg = sign(storlg)
rp = 7 if storlg * d == 1 else 0
field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0
field[new[0]][rp] = 0 if d == 1 else 2 * color
cont = castling_control[color]
castling_control[color] = cont[0], cont[1] - storlg + d, cont[2
] + storlg + d
castling_control[color] = castling_control[color][0] + d, castling_control[
color][1], castling_control[color][2]
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1
), cont[2] + d * (sign(y - 3) + 1)
def trans_pawn(color, old):
return True if old[0] * color % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0] - old[0]) == 2:
take_on_aisle = color, new[1]
else:
take_on_aisle = 'l', 8
return take_on_aisle
def take_on_aisle_move(field, color, old, new, fig, d, main):
global take_on_aisle
if main == 1:
take_on_aisle_pawn(color, old, new)
if abs(old[1] - new[1]) == 1:
if field[new[0]][new[1]] == 0 and d == 1:
field[old[0]][new[1]] = 0
if fig == 0 and d == -1:
field[new[0]][old[1]] = -color
def move(field, old, new, fig=0, d=1, trans_fig=1, main=0):
global trans, take_on_aisle
color = sign(field[old[0]][old[1]])
figure = abs(field[old[0]][old[1]])
if figure == 2:
rook(field, color, old, new, d)
if figure == 6:
king_and_castling(field, color, old, new, d)
if trans == True:
figure = 1
trans = False
if figure == 1:
trans = trans_pawn(color, old) if d == 1 else False
if trans == True:
figure = trans_fig
take_on_aisle_move(field, color, old, new, fig, d, main)
if main == 1:
trans = False
field[new[0]][new[1]] = color * figure
field[old[0]][old[1]] = fig
<|reserved_special_token_1|>
from field import print_field
from math_utilite import sign, col
def start_parameter_2(par):
global cell_king, castling_control, trans, take_on_aisle
cell_king = par[0]
castling_control = par[1]
trans = par[2]
take_on_aisle = par[3]
def det_cell_king(field):
global cell_king
cell_king = {sign(fig):(x, y) for x, row in enumerate(field) for y, fig in enumerate(row) if abs(fig)==6}
return cell_king
def det_castling_control(field):
global castling_control
for color in (1, -1):
hor = 0 if color == 1 else 7
dk = 0 if field[hor][4] == 6*color else 1
dlr = 0 if field[hor][0] == 2*color else 1
drr = 0 if field[hor][-1] == 2*color else 1
castling_control[color] = (dk, dlr, drr)
return castling_control
def king_and_castling(field, color, old, new, d):
global cell_king, castling_control
cell_king[color] = (new[0], new[1])
storlg=new[1]-old[1]
if abs(storlg) == 2:
storlg = sign(storlg)
rp = 7 if storlg*d == 1 else 0
field[new[0]][new[1]-storlg] = 2*color if d == 1 else 0
field[new[0]][rp] = 0 if d == 1 else 2*color
cont = castling_control[color]
castling_control[color] = (cont[0], cont[1]-storlg+d, cont[2]+storlg+d)
castling_control[color] = (castling_control[color][0]+d, castling_control[color][1], castling_control[color][2])
def rook(field, color, old, new, d):
global castling_control
hor = 0 if color == 1 else 7
cont = castling_control[color]
x, y = old if d == 1 else new
if x == hor and y % 7 == 0:
castling_control[color] = (cont[0], cont[1] + d*(-sign(y-3)+1), cont[2] + d*(sign(y-3)+1))
def trans_pawn(color, old):
return True if (old[0] * color) % 7 == 6 else False
def take_on_aisle_pawn(color, old, new):
global take_on_aisle
if abs(new[0]-old[0]) == 2:
take_on_aisle = (color, new[1])
else:
take_on_aisle = ('l', 8)
return take_on_aisle
def take_on_aisle_move(field, color, old, new, fig, d, main):
global take_on_aisle
if main == 1:
take_on_aisle_pawn(color, old, new)
if abs(old[1]-new[1]) == 1:
if field[new[0]][new[1]] == 0 and d == 1:
field[old[0]][new[1]] = 0
if fig == 0 and d == -1:
field[new[0]][old[1]] = -color
def move(field, old, new, fig=0, d=1, trans_fig=1, main=0):
global trans, take_on_aisle
color = sign(field[old[0]][old[1]])
figure = abs(field[old[0]][old[1]])
if figure == 2:
rook(field, color, old, new, d)
if figure == 6:
king_and_castling(field, color, old, new, d)
if trans == True:
figure = 1
trans = False
if figure == 1:
trans = trans_pawn(color, old) if d == 1 else False
if trans == True:
figure = trans_fig
take_on_aisle_move(field, color, old, new, fig, d, main)
if main == 1:
trans = False
field[new[0]][new[1]] = color*figure
field[old[0]][old[1]] = fig
|
flexible
|
{
"blob_id": "90c9456bf22745d99fa76dbc752beae1a3835682",
"index": 7672,
"step-1": "<mask token>\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\n<mask token>\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\n<mask token>\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n",
"step-3": "<mask token>\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = new[0], new[1]\n storlg = new[1] - old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg * d == 1 else 0\n field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2 * color\n cont = castling_control[color]\n castling_control[color] = cont[0], cont[1] - storlg + d, cont[2\n ] + storlg + d\n castling_control[color] = castling_control[color][0] + d, castling_control[\n color][1], castling_control[color][2]\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n",
"step-4": "from field import print_field\nfrom math_utilite import sign, col\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n\n\ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig): (x, y) for x, row in enumerate(field) for y,\n fig in enumerate(row) if abs(fig) == 6}\n return cell_king\n\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6 * color else 1\n dlr = 0 if field[hor][0] == 2 * color else 1\n drr = 0 if field[hor][-1] == 2 * color else 1\n castling_control[color] = dk, dlr, drr\n return castling_control\n\n\ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = new[0], new[1]\n storlg = new[1] - old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg * d == 1 else 0\n field[new[0]][new[1] - storlg] = 2 * color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2 * color\n cont = castling_control[color]\n castling_control[color] = cont[0], cont[1] - storlg + d, cont[2\n ] + storlg + d\n castling_control[color] = castling_control[color][0] + d, castling_control[\n color][1], castling_control[color][2]\n\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = cont[0], cont[1] + d * (-sign(y - 3) + 1\n ), cont[2] + d * (sign(y - 3) + 1)\n\n\ndef trans_pawn(color, old):\n return True if old[0] * color % 7 == 6 else False\n\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0] - old[0]) == 2:\n take_on_aisle = color, new[1]\n else:\n take_on_aisle = 'l', 8\n return take_on_aisle\n\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1] - new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False\n if trans == True:\n figure = trans_fig\n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color * figure\n field[old[0]][old[1]] = fig\n",
"step-5": "from field import print_field\nfrom math_utilite import sign, col\n\n\ndef start_parameter_2(par):\n global cell_king, castling_control, trans, take_on_aisle\n cell_king = par[0]\n castling_control = par[1]\n trans = par[2]\n take_on_aisle = par[3]\n \ndef det_cell_king(field):\n global cell_king\n cell_king = {sign(fig):(x, y) for x, row in enumerate(field) for y, fig in enumerate(row) if abs(fig)==6}\n return cell_king\n\ndef det_castling_control(field):\n global castling_control\n for color in (1, -1):\n hor = 0 if color == 1 else 7\n dk = 0 if field[hor][4] == 6*color else 1\n dlr = 0 if field[hor][0] == 2*color else 1\n drr = 0 if field[hor][-1] == 2*color else 1\n castling_control[color] = (dk, dlr, drr)\n return castling_control\n \n \ndef king_and_castling(field, color, old, new, d):\n global cell_king, castling_control\n cell_king[color] = (new[0], new[1])\n storlg=new[1]-old[1]\n if abs(storlg) == 2:\n storlg = sign(storlg)\n rp = 7 if storlg*d == 1 else 0\n field[new[0]][new[1]-storlg] = 2*color if d == 1 else 0\n field[new[0]][rp] = 0 if d == 1 else 2*color\n cont = castling_control[color] \n castling_control[color] = (cont[0], cont[1]-storlg+d, cont[2]+storlg+d)\n castling_control[color] = (castling_control[color][0]+d, castling_control[color][1], castling_control[color][2])\n\ndef rook(field, color, old, new, d):\n global castling_control\n hor = 0 if color == 1 else 7\n cont = castling_control[color]\n x, y = old if d == 1 else new\n if x == hor and y % 7 == 0:\n castling_control[color] = (cont[0], cont[1] + d*(-sign(y-3)+1), cont[2] + d*(sign(y-3)+1))\n\ndef trans_pawn(color, old):\n return True if (old[0] * color) % 7 == 6 else False\n\ndef take_on_aisle_pawn(color, old, new):\n global take_on_aisle\n if abs(new[0]-old[0]) == 2:\n take_on_aisle = (color, new[1])\n else:\n take_on_aisle = ('l', 8)\n return take_on_aisle\n\ndef take_on_aisle_move(field, color, old, new, fig, d, main):\n global take_on_aisle\n if main == 1:\n take_on_aisle_pawn(color, old, new)\n if abs(old[1]-new[1]) == 1:\n if field[new[0]][new[1]] == 0 and d == 1:\n field[old[0]][new[1]] = 0\n if fig == 0 and d == -1:\n field[new[0]][old[1]] = -color\n\ndef move(field, old, new, fig=0, d=1, trans_fig=1, main=0):\n global trans, take_on_aisle\n color = sign(field[old[0]][old[1]])\n figure = abs(field[old[0]][old[1]])\n if figure == 2:\n rook(field, color, old, new, d)\n if figure == 6:\n king_and_castling(field, color, old, new, d)\n if trans == True:\n figure = 1\n trans = False\n if figure == 1:\n trans = trans_pawn(color, old) if d == 1 else False \n if trans == True: \n figure = trans_fig \n take_on_aisle_move(field, color, old, new, fig, d, main)\n if main == 1:\n trans = False\n field[new[0]][new[1]] = color*figure\n field[old[0]][old[1]] = fig\n\n\n\n",
"step-ids": [
4,
8,
9,
10,
11
]
}
|
[
4,
8,
9,
10,
11
] |
"""
openAI gym 'cart pole-v0'
"""
import numpy as np
import tensorflow as tf
from collections import deque
import random
import dqn
import gym
import matplotlib.pyplot as plt
# define environment
env = gym.make('CartPole-v0')
# define parameters
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
# DISCOUNT_RATE : y = (1-dr)x + dr(r+f(x+1))
# REPLAY_MEMORY : memory size
# BATCH_SIZE : BATCH- training
# TARGET_UPDATE_FREQUENCY : targetW <- mainW each n
# MAX_EPISODE : n of trainning epoch
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
# copy targetW from mainW values
def get_copy_var_ops(src_scope_name:str, dest_scope_name:str)->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN:dqn.DQN, targetDQN:dqn.DQN, train_batch:list)->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE*np.max(targetDQN.predict(next_states), axis=1)*~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X,y)
def bot_play(mainDQN:dqn.DQN, env:gym.Env)->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print("\n Total Score : {}".format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="main")
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="target")
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops("main","target")
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1./ ((episode/10)+1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done))
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(" EP : {} | steps : {} | EP loss : {}".format(episode+1, step_count, loss), end="\r")
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward>199:
print("\n game cleared, avg_reward : {}, episode : {}".format(avg_reward, episode+1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1,2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "9a40861239268aa62075b77b3ed452f31bb14fac",
"index": 2458,
"step-1": "<mask token>\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nenv = gym.make('CartPole-v0')\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nimport random\nimport dqn\nimport gym\nimport matplotlib.pyplot as plt\nenv = gym.make('CartPole-v0')\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n\ndef get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:\n holder = []\n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\n dest_scope_name)\n for src_var, dest_var in zip(src_vars, dest_vars):\n holder.append(dest_var.assign(src_var.value()))\n return holder\n\n\ndef replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list\n ) ->float:\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(\n next_states), axis=1) * ~done\n X = states\n y = mainDQN.predict(states)\n y[np.arange(len(states)), actions] = Q_target\n return mainDQN.update(X, y)\n\n\ndef bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:\n state = env.reset()\n reward_sum = 0\n while True:\n env.render()\n action = np.argmax(mainDQN.predict(state))\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n if done:\n print('\\n Total Score : {}'.format(reward_sum))\n break\n\n\ndef main():\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n last_100 = deque(maxlen=100)\n step_list = []\n loss_list = []\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')\n sess.run(tf.global_variables_initializer())\n copy_ops = get_copy_var_ops('main', 'target')\n sess.run(copy_ops)\n for episode in range(MAX_EPISODE):\n e = 1.0 / (episode / 10 + 1)\n done = False\n step_count = 0\n state = env.reset()\n loss = 0\n while not done:\n if np.random.rand() < e:\n action = env.action_space.sample()\n else:\n action = np.argmax(mainDQN.predict(state))\n next_states, reward, done, _ = env.step(action)\n if done:\n reward = -1\n replay_buffer.append((state, action, reward, next_states, done)\n )\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n state = next_states\n step_count += 1\n print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,\n step_count, loss), end='\\r')\n step_list.append(step_count)\n loss_list.append(loss)\n last_100.append(step_count)\n if len(last_100) == last_100.maxlen:\n avg_reward = np.mean(last_100)\n if avg_reward > 199:\n print('\\n game cleared, avg_reward : {}, episode : {}'.\n format(avg_reward, episode + 1))\n break\n step_array = np.asarray(step_list)\n loss_array = np.asarray(loss_list)\n _, plot = plt.subplots(1, 2)\n plot[0].plot(step_array)\n plot[1].plot(loss_array)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nopenAI gym 'cart pole-v0'\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nimport random\nimport dqn\nimport gym\nimport matplotlib.pyplot as plt\n\n# define environment\nenv = gym.make('CartPole-v0')\n\n# define parameters\nINPUT_SIZE = env.observation_space.shape[0]\nOUTPUT_SIZE = env.action_space.n\n\n# DISCOUNT_RATE : y = (1-dr)x + dr(r+f(x+1))\n# REPLAY_MEMORY : memory size\n# BATCH_SIZE : BATCH- training\n# TARGET_UPDATE_FREQUENCY : targetW <- mainW each n\n# MAX_EPISODE : n of trainning epoch\nDISCOUNT_RATE = 0.9\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 64\nTARGET_UPDATE_FREQUENCY = 5\nMAX_EPISODE = 1000\n\n# copy targetW from mainW values\ndef get_copy_var_ops(src_scope_name:str, dest_scope_name:str)->list:\n\tholder = []\n\tsrc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n\t\tscope = src_scope_name)\n\tdest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n\t\tscope = dest_scope_name)\n\tfor src_var, dest_var in zip(src_vars, dest_vars):\n\t\tholder.append(dest_var.assign(src_var.value()))\n\treturn holder\n\ndef replay_train(mainDQN:dqn.DQN, targetDQN:dqn.DQN, train_batch:list)->float:\n\tstates = np.vstack([x[0] for x in train_batch])\n\tactions = np.array([x[1] for x in train_batch])\n\trewards = np.array([x[2] for x in train_batch])\n\tnext_states = np.vstack([x[3] for x in train_batch])\n\tdone = np.array([x[4] for x in train_batch])\n\n\tQ_target = rewards + DISCOUNT_RATE*np.max(targetDQN.predict(next_states), axis=1)*~done\n\tX = states\n\ty = mainDQN.predict(states)\n\ty[np.arange(len(states)), actions] = Q_target\n\n\treturn mainDQN.update(X,y)\n\ndef bot_play(mainDQN:dqn.DQN, env:gym.Env)->None:\n\tstate = env.reset()\n\treward_sum = 0\n\n\twhile True:\n\t\tenv.render()\n\t\taction = np.argmax(mainDQN.predict(state))\n\t\tstate, reward, done, _ = env.step(action)\n\t\treward_sum += reward\n\n\t\tif done:\n\t\t\tprint(\"\\n Total Score : {}\".format(reward_sum))\n\t\t\tbreak\n\ndef main():\n\treplay_buffer = deque(maxlen=REPLAY_MEMORY)\n\tlast_100 = deque(maxlen=100)\n\tstep_list = []\n\tloss_list = []\n\n\twith tf.Session() as sess:\n\t\tmainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"main\")\n\t\ttargetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"target\")\n\t\tsess.run(tf.global_variables_initializer())\n\n\t\tcopy_ops = get_copy_var_ops(\"main\",\"target\")\n\t\tsess.run(copy_ops)\n\t\t\n\t\tfor episode in range(MAX_EPISODE):\n\t\t\te = 1./ ((episode/10)+1)\n\t\t\tdone = False\n\t\t\tstep_count = 0\n\t\t\tstate = env.reset()\n\t\t\tloss = 0\n\n\t\t\twhile not done:\n\t\t\t\tif np.random.rand() < e:\n\t\t\t\t\taction = env.action_space.sample()\n\t\t\t\telse:\n\t\t\t\t\taction = np.argmax(mainDQN.predict(state))\n\n\t\t\t\tnext_states, reward, done, _ = env.step(action)\n\n\t\t\t\tif done:\n\t\t\t\t\treward = -1\n\t\t\t\treplay_buffer.append((state, action, reward, next_states, done))\n\n\t\t\t\tif len(replay_buffer) > BATCH_SIZE:\n\t\t\t\t\tminibatch = random.sample(replay_buffer, BATCH_SIZE)\n\t\t\t\t\tloss, _ = replay_train(mainDQN, targetDQN, minibatch)\n\n\t\t\t\tif step_count % TARGET_UPDATE_FREQUENCY == 0:\n\t\t\t\t\tsess.run(copy_ops)\n\n\t\t\t\tstate = next_states\n\t\t\t\tstep_count += 1\n\n\t\t\tprint(\" EP : {} | steps : {} | EP loss : {}\".format(episode+1, step_count, loss), end=\"\\r\")\n\n\t\t\tstep_list.append(step_count)\n\t\t\tloss_list.append(loss)\n\t\t\tlast_100.append(step_count)\n\n\t\t\tif len(last_100) == last_100.maxlen:\n\t\t\t\tavg_reward = np.mean(last_100)\n\t\t\t\tif avg_reward>199:\n\t\t\t\t\tprint(\"\\n game cleared, avg_reward : {}, episode : {}\".format(avg_reward, episode+1))\n\t\t\t\t\tbreak\n\n\t\tstep_array = np.asarray(step_list)\n\t\tloss_array = np.asarray(loss_list)\n\t\t_, plot = plt.subplots(1,2)\n\t\tplot[0].plot(step_array)\n\t\tplot[1].plot(loss_array)\n\t\tplt.show()\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from binance.client import Client
from binance.websockets import BinanceSocketManager
from binance.enums import *
import time
import threading
import winsound
# Replace your_api_key, your_api_secret with your api_key, api_secret
client = Client(your_api_key, your_api_secret)
# Calculate list of symbols
def calculate_data_list():
counter=0
btc='BTC'
symbols=[]
all_positions=[]
positions_final=[]
volume=[]
c=[]
price_change = []
data=client.get_ticker()
for x in range(len(data)):
if (btc in data[x]['symbol']) and data[x]['symbol'] != 'BTCUSDT'and data[x]['symbol'] != 'VENBTC':
if float(data[x]['quoteVolume'])>100:
all_positions.append(x)
for x in all_positions:
c.append(float(data[x]['priceChangePercent']))
i = sorted(range(len(c)), key=lambda k: c[k])
i.reverse()
while (len(positions_final) < 20 and len(positions_final) < len(all_positions)):
symbols.append(data[all_positions[i[counter]]]['symbol'])
positions_final.append(all_positions[i[counter]])
volume.append(data[all_positions[i[counter]]]['quoteVolume'])
price_change.append(data[all_positions[i[counter]]]['priceChangePercent'])
counter += 1
return symbols, volume, positions_final, price_change
# Get candlestick data from Binance
def get_kline():
symbols, volume, pozitii,price_change = calculate_data_list()
prices = []
prices1 = []
k=[]
for x in symbols:
try:
order = client.get_klines( # Get 1 minute candlestick data from server
symbol=x,
interval='1m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
try:
order1 = client.get_klines( # Get 15 minute candlestick data from server
symbol=x,
limit= 1000,
interval='15m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
if len(order1) < 970: # check if coin have at least 10 days of data
a = symbols.index(x) # get index of x in symbols
k.append(a)
else:
prices.append([]) # add empty list to list of 1 minute
prices1.append([]) # add empty list to list of 15 minutes
for i in range(len(order)):
prices[-1].append(float(order[i][1])) # save 1 minute data
for i in range(len(order1)):
prices1[-1].append(float(order1[i][1])) # save 15 minute data
k.reverse()
for x in k:
symbols.pop(x)
volume.pop(x)
all_positions.pop(x)
price_change.pop(x)
return symbols, volume, pozitii, prices, prices1,price_change
# Calculate report between bid and ask offers
def process_depth(msg):
sums5=0
sumb5=0
m=-1
for x in range(5):
if float(msg['data']['bids'][x][1])>m:
m=float(msg['data']['bids'][x][1])
sums5 = sums5 + float(msg['data']['bids'][x][1])
sumb5 = sumb5 + float(msg['data']['asks'][x][1])
ratio1 = sums5 / sumb5
if (ratio1 < 1):
ratio1 = ((1 / ratio1) * -1) + 1
else:
ratio1 -= 1
sums20 = 0
sumb20 = 0
ratio2 = 0
try:
for x in range(17):
sums20 = sums20 + float(msg['data']['bids'][x][1])
sumb20 = sumb20 + float(msg['data']['asks'][x][1])
ratio2 = sums20 / sumb20
if (ratio2 < 1):
ratio2 = ((1 / ratio2) * -1) + 1
else:
ratio2 -= 1
except Exception as e:
print("")
for i in range(len(symbols)):
simbol = symbols[i].lower() + '@depth20'
if simbol == msg['stream']:
ratio5[i] = round(ratio1, 2)
ratio20[i] = round(ratio2, 2)
max_order5[i] = m
ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * 100 / float(volume[i]),2)
current_price[i] = float(msg['data']['bids'][0][0])
# Refresh price and volume to current price and volume
def process_ticker(msg):
i=0
for x in symbols:
for y in range(len(msg)):
if x == str(msg[y]['s']):
volume[i] = int(float(msg[y]['q']))
price_change[i] = int(float(msg[y]['P']))
i+=1
symbols,volume,pozitii,k_line_1m,k_line_15m,price_change =get_kline()
# Declaring lists necessary for storing data
max_order5=[0 for x in range(len(symbols))]
current_price= [0 for x in range(len(symbols))]
price_chance_2_min = [0 for x in range(len(symbols))]
price_chance_5_min = [0 for x in range(len(symbols))]
price_chance_15_min = [0 for x in range(len(symbols))]
price_chance_30_min = [0 for x in range(len(symbols))]
price_change_25_30_min = [0 for x in range(len(symbols))]
price_chance_1_hour = [0 for x in range(len(symbols))]
price_chance_3_hour = [0 for x in range(len(symbols))]
price_chance_8_hour = [0 for x in range(len(symbols))]
price_change_1_days = [0 for x in range(len(symbols))]
price_change_3_days = [0 for x in range(len(symbols))]
price_change_5_days = [0 for x in range(len(symbols))]
price_change_7_days = [0 for x in range(len(symbols))]
price_change_10_days = [0 for x in range(len(symbols))]
average_10_min = [0 for x in range(len(symbols))]
average_20_min = [0 for x in range(len(symbols))]
average_50_min = [0 for x in range(len(symbols))]
average_100_min = [0 for x in range(len(symbols))]
average_change_10_min = [0 for x in range(len(symbols))]
average_change_20_min = [0 for x in range(len(symbols))]
average_change_50_min = [0 for x in range(len(symbols))]
average_change_100_min = [0 for x in range(len(symbols))]
total_score = [0 for x in range(len(symbols))]
ratio5=[0 for x in range(len(symbols))]
ratio5_10sec=[[] for y in range(len(symbols))]
ratio5_sum = [0 for x in range(len(symbols))]
ratio5_sum_10sec = [[] for y in range(len(symbols))]
ratio20= [0 for x in range(len(symbols))]
# Create list neccessary for depth socked
list=[]
for x in symbols:
list.append(x.lower()+'@depth20') # append @depth20 to each symbol and add it into list
bm = BinanceSocketManager(client)
bm.start()
depth_socket = bm.start_multiplex_socket(list,process_depth) # start depth socket
ticker_socket = bm.start_ticker_socket(process_ticker) # start price socket
# maintain candlestick lists
def kline_continuum():
i=0
while True:
time.sleep(60)
for x in range(len(symbols)):
k_line_1m[x].pop(0)
k_line_1m[x].append(current_price[x]) # add price to list of 1 minute candlestick every 1 minute
if i%15==0:
k_line_15m[x].pop(0)
k_line_15m[x].append(current_price[x]) # add price to list of 15 minute candlestick every 15 minute
i+=1
# Save report between ask and bit for the last 10 seconds
def report_10_seconds():
while True:
for x in range(len(symbols)):
if len(ratio5_10sec[x])>10:
ratio5_10sec[x].pop(0)
if len(ratio5_sum_10sec[x]) > 10:
ratio5_sum_10sec[x].pop(0)
ratio5_10sec[x].append(ratio5[x])
ratio5_sum_10sec[x].append(ratio5_sum[x])
time.sleep(1)
# Calculate score for each symbol, you can add as many parameters as you want
def calculate_score():
for x in range(len(symbols)):
score = 0
# 2 minute change parameter score calculation
a = float(price_chance_2_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 1.5:
score += 1.5
elif a >= 1.5 and a < 2:
score += 0.5
elif a >= 3:
score += 0.25
# 5 minute change parameter score calculation
a = float(price_chance_5_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 2:
score += 1.5
elif a >= 2 and a < 3:
score += 0.5
elif a >= 3:
score += 0.25
# 15 minute change parameter score calculation
a = float(price_chance_15_min[x])
if a <= 1 and a > -0.5:
score += 0.25
elif a <= -0.5 and a > -1:
score += 0.5
elif a <= -1 and a > -1.5:
score += 0.75
elif a <= -1.5:
score += 1
# change between 25 and 30 minutes ago parameter score calculation
a = float(price_change_25_30_min[x])
if a <= 2 and a > -0.75:
score += 0.25
elif a <= -0.75 and a > -1.25:
score += 0.5
elif a <= -1.25 and a > -1.75:
score += 0.75
elif a <= -1.75:
score += 1
# 1 hour change parameter score calculation
a = float(price_chance_1_hour[x])
if a <= 2 and a >= 0:
score += 0.5
elif a <= 0 and a > -2:
score += 0.75
elif a <= -2:
score += 1
# 3 hour change parameter score calculation
a = float(price_chance_3_hour[x])
if a <= 5 and a > -1:
score += 0.25
elif a <= -1 and a > -3:
score += 0.5
elif a <= -3 and a > -6:
score += 0.75
elif a <= -6:
score += 1
# 8 hour change parameter score calculation
a = float(price_chance_8_hour[x])
if a <= 0 and a > -4:
score += 0.25
elif a <= -4 and a > -6:
score += 0.5
elif a <= -6:
score += 0.75
if float(ratio5[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])):
if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min[x]) > 1:
if float(ratio5_10sec[x][i]) > 0:
a += 1
if float(ratio5_sum_10sec[x][i]) > 0.3:
a += 1
score += a / len(ratio5_sum_10sec[x])
if float(ratio20[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])-1):
if float(ratio5_10sec[x][i]) > 0:
a += 1
if a <= 2:
score += 0.25
elif a > 2 and a <= 4:
score += 0.5
elif a > 4 and a <= 7:
score += 0.75
elif a > 7:
score += 1
a = 0
for i in range(20, 1, -1):
if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):
a += 1
score += a / 10
# 1 day change parameter score calculation
if float(price_change_1_days[x]) > 5:
score+=0.3
# 3 day change parameter score calculation
if float(price_change_3_days[x]) > 10:
score += 0.25
# 5 day change parameter score calculation
if float(price_change_5_days[x]) > 15:
score += 0.25
# 7 day change parameter score calculation
if float(price_change_7_days[x]) > 20:
score += 0.25
# 10 day change parameter score calculation
if float(price_change_10_days[x]) > -25:
score += 0.25
# 10 minutes moving average parameter score calculation
a=float(average_change_10_min[x])
if a<0.2 and a>-0.3:
score+=0.1
# 20 minutes moving average parameter score calculation
a = float(average_change_20_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 50 minutes moving average parameter score calculation
a = float(average_change_50_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 100 minutes moving average parameter score calculation
a = float(average_change_100_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# save score
total_score[x] = score
def print_results():
# sleep time before starting calculations
time.sleep(10)
while True:
for x in range(len(symbols)):
# calculate parameters percentages
try:
price_chance_2_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 2]) - 100, 2)
price_chance_5_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 5]) - 100, 2)
price_chance_15_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 15]) - 100, 2)
price_chance_30_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_chance_1_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 60]) - 100, 2)
price_chance_3_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 180]) - 100, 2)
price_chance_8_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][20]) - 100, 2)
price_change_25_30_min[x] = round(float(k_line_1m[x][- 6]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_change_1_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 96]) - 100, 1)
price_change_3_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 288]) - 100, 1)
price_change_5_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 480] )- 100, 1)
price_change_7_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 672]) - 100, 1)
price_change_10_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 960]) - 100, 1)
average_10_min[x] = round(float(sum(k_line_1m[x][- 10:])) / 10, 8)
average_20_min[x] = round(float(sum(k_line_1m[x][- 20:])) / 20, 8)
average_50_min[x] = round(float(sum(k_line_1m[x][- 50:])) / 50, 8)
average_100_min[x] = round(float(sum(k_line_1m[x][- 100:])) / 100, 8)
average_change_10_min[x] = round(float(current_price[x]) * 100 / float(average_10_min[x]) - 100, 2)
average_change_20_min[x] = round(float(current_price[x]) * 100 / float(average_20_min[x]) - 100, 2)
average_change_50_min[x] = round(float(current_price[x]) * 100 / float(average_50_min[x]) - 100, 2)
average_change_100_min[x] = round(float(current_price[x]) * 100 / float(average_100_min[x]) - 100, 2)
except Exception as e:
print(e)
# call function for score calculation
calculate_score()
# select parameter for which data is sorted
sort_by = total_score
# sort data
sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])
# sort data in reverse order
sorted_data.reverse()
#print table header
print (time.ctime())
print ('%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
'Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch', '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',
'25-30m', 'r5sum', '1d_ch', '3d_ch','5d_ch', '7d_ch', '10d_ch'))
# print top 10 cryptocurrencies data
for k in range(10):
i = sorted_data[k]
print ('%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
symbols[i][:-3], total_score[i], ratio5[i], ratio20[i], price_chance_2_min[i], price_chance_5_min[i],
price_chance_15_min[i],price_chance_30_min[i], price_chance_1_hour[i], average_change_10_min[i],
average_change_20_min[i],average_change_50_min[i], average_change_100_min[i], price_chance_8_hour[i],
price_change_25_30_min[i], ratio5_sum[i], price_change_1_days[i], price_change_3_days[i],
price_change_5_days[i], price_change_7_days[i], price_change_10_days[i]))
# if score for one coin is > 10 will play sound
try:
if float(total_score[sorted_data[0]]) > 10:
winsound.PlaySound('\\Sound.wav', winsound.SND_FILENAME)
except Exception as e:
print(e)
# Seconds to wait before repeating while loop
time.sleep(1)
# Declaring threads
threads = [threading.Thread(target=kline_continuum),
threading.Thread(target=report_10_seconds),
threading.Thread(target=print_results)]
# Starting threads
[thread.start() for thread in threads]
[thread.join() for thread in threads]
|
normal
|
{
"blob_id": "dcc85b143f2394b7839f2fb9c2079a7dd9fa8e88",
"index": 4733,
"step-1": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\n<mask token>\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\n<mask token>\nfor x in symbols:\n list.append(x.lower() + '@depth20')\n<mask token>\nbm.start()\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\n<mask token>\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n",
"step-4": "from binance.client import Client\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nimport time\nimport threading\nimport winsound\nclient = Client(your_api_key, your_api_secret)\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\nsymbols, volume, pozitii, k_line_1m, k_line_15m, price_change = get_kline()\nmax_order5 = [(0) for x in range(len(symbols))]\ncurrent_price = [(0) for x in range(len(symbols))]\nprice_chance_2_min = [(0) for x in range(len(symbols))]\nprice_chance_5_min = [(0) for x in range(len(symbols))]\nprice_chance_15_min = [(0) for x in range(len(symbols))]\nprice_chance_30_min = [(0) for x in range(len(symbols))]\nprice_change_25_30_min = [(0) for x in range(len(symbols))]\nprice_chance_1_hour = [(0) for x in range(len(symbols))]\nprice_chance_3_hour = [(0) for x in range(len(symbols))]\nprice_chance_8_hour = [(0) for x in range(len(symbols))]\nprice_change_1_days = [(0) for x in range(len(symbols))]\nprice_change_3_days = [(0) for x in range(len(symbols))]\nprice_change_5_days = [(0) for x in range(len(symbols))]\nprice_change_7_days = [(0) for x in range(len(symbols))]\nprice_change_10_days = [(0) for x in range(len(symbols))]\naverage_10_min = [(0) for x in range(len(symbols))]\naverage_20_min = [(0) for x in range(len(symbols))]\naverage_50_min = [(0) for x in range(len(symbols))]\naverage_100_min = [(0) for x in range(len(symbols))]\naverage_change_10_min = [(0) for x in range(len(symbols))]\naverage_change_20_min = [(0) for x in range(len(symbols))]\naverage_change_50_min = [(0) for x in range(len(symbols))]\naverage_change_100_min = [(0) for x in range(len(symbols))]\ntotal_score = [(0) for x in range(len(symbols))]\nratio5 = [(0) for x in range(len(symbols))]\nratio5_10sec = [[] for y in range(len(symbols))]\nratio5_sum = [(0) for x in range(len(symbols))]\nratio5_sum_10sec = [[] for y in range(len(symbols))]\nratio20 = [(0) for x in range(len(symbols))]\nlist = []\nfor x in symbols:\n list.append(x.lower() + '@depth20')\nbm = BinanceSocketManager(client)\nbm.start()\ndepth_socket = bm.start_multiplex_socket(list, process_depth)\nticker_socket = bm.start_ticker_socket(process_ticker)\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\nthreads = [threading.Thread(target=kline_continuum), threading.Thread(\n target=report_10_seconds), threading.Thread(target=print_results)]\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n",
"step-5": "from binance.client import Client\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nimport time\nimport threading\nimport winsound\n\n# Replace your_api_key, your_api_secret with your api_key, api_secret\nclient = Client(your_api_key, your_api_secret)\n\n\n# Calculate list of symbols\ndef calculate_data_list():\n counter=0\n btc='BTC'\n symbols=[]\n all_positions=[]\n positions_final=[]\n volume=[]\n c=[]\n price_change = []\n data=client.get_ticker()\n for x in range(len(data)):\n if (btc in data[x]['symbol']) and data[x]['symbol'] != 'BTCUSDT'and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume'])>100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while (len(positions_final) < 20 and len(positions_final) < len(all_positions)):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]]['priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\n# Get candlestick data from Binance\ndef get_kline():\n symbols, volume, pozitii,price_change = calculate_data_list()\n prices = []\n prices1 = []\n k=[]\n\n for x in symbols:\n try:\n order = client.get_klines( # Get 1 minute candlestick data from server\n symbol=x,\n interval='1m')\n except BinanceAPIException as e:\n print (e.status_code)\n print (e.message)\n try:\n order1 = client.get_klines( # Get 15 minute candlestick data from server\n symbol=x,\n limit= 1000,\n interval='15m')\n except BinanceAPIException as e:\n print (e.status_code)\n print (e.message)\n\n if len(order1) < 970: # check if coin have at least 10 days of data\n a = symbols.index(x) # get index of x in symbols\n k.append(a)\n else:\n prices.append([]) # add empty list to list of 1 minute\n prices1.append([]) # add empty list to list of 15 minutes\n for i in range(len(order)):\n prices[-1].append(float(order[i][1])) # save 1 minute data\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1])) # save 15 minute data\n k.reverse()\n\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n\n return symbols, volume, pozitii, prices, prices1,price_change\n# Calculate report between bid and ask offers\ndef process_depth(msg):\n sums5=0\n sumb5=0\n m=-1\n for x in range(5):\n if float(msg['data']['bids'][x][1])>m:\n m=float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if (ratio1 < 1):\n ratio1 = ((1 / ratio1) * -1) + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if (ratio2 < 1):\n ratio2 = ((1 / ratio2) * -1) + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print(\"\")\n\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * 100 / float(volume[i]),2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\n# Refresh price and volume to current price and volume\ndef process_ticker(msg):\n i=0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i+=1\n\nsymbols,volume,pozitii,k_line_1m,k_line_15m,price_change =get_kline()\n\n\n# Declaring lists necessary for storing data\nmax_order5=[0 for x in range(len(symbols))]\ncurrent_price= [0 for x in range(len(symbols))]\nprice_chance_2_min = [0 for x in range(len(symbols))]\nprice_chance_5_min = [0 for x in range(len(symbols))]\nprice_chance_15_min = [0 for x in range(len(symbols))]\nprice_chance_30_min = [0 for x in range(len(symbols))]\nprice_change_25_30_min = [0 for x in range(len(symbols))]\nprice_chance_1_hour = [0 for x in range(len(symbols))]\nprice_chance_3_hour = [0 for x in range(len(symbols))]\nprice_chance_8_hour = [0 for x in range(len(symbols))]\nprice_change_1_days = [0 for x in range(len(symbols))]\nprice_change_3_days = [0 for x in range(len(symbols))]\nprice_change_5_days = [0 for x in range(len(symbols))]\nprice_change_7_days = [0 for x in range(len(symbols))]\nprice_change_10_days = [0 for x in range(len(symbols))]\naverage_10_min = [0 for x in range(len(symbols))]\naverage_20_min = [0 for x in range(len(symbols))]\naverage_50_min = [0 for x in range(len(symbols))]\naverage_100_min = [0 for x in range(len(symbols))]\naverage_change_10_min = [0 for x in range(len(symbols))]\naverage_change_20_min = [0 for x in range(len(symbols))]\naverage_change_50_min = [0 for x in range(len(symbols))]\naverage_change_100_min = [0 for x in range(len(symbols))]\ntotal_score = [0 for x in range(len(symbols))]\nratio5=[0 for x in range(len(symbols))]\nratio5_10sec=[[] for y in range(len(symbols))]\nratio5_sum = [0 for x in range(len(symbols))]\nratio5_sum_10sec = [[] for y in range(len(symbols))]\nratio20= [0 for x in range(len(symbols))]\n\n# Create list neccessary for depth socked\nlist=[]\nfor x in symbols:\n list.append(x.lower()+'@depth20') # append @depth20 to each symbol and add it into list\n\nbm = BinanceSocketManager(client)\nbm.start()\ndepth_socket = bm.start_multiplex_socket(list,process_depth) # start depth socket\nticker_socket = bm.start_ticker_socket(process_ticker) # start price socket\n\n# maintain candlestick lists\ndef kline_continuum():\n i=0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x]) # add price to list of 1 minute candlestick every 1 minute\n if i%15==0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x]) # add price to list of 15 minute candlestick every 15 minute\n i+=1\n\n\n# Save report between ask and bit for the last 10 seconds\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x])>10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\n# Calculate score for each symbol, you can add as many parameters as you want\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n\n # 2 minute change parameter score calculation\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n\n # 5 minute change parameter score calculation\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n\n # 15 minute change parameter score calculation\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n\n # change between 25 and 30 minutes ago parameter score calculation\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n\n # 1 hour change parameter score calculation\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n\n # 3 hour change parameter score calculation\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n\n # 8 hour change parameter score calculation\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n\n\n\n if float(ratio5[x]) > 0:\n score += 1\n\n\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min[x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n\n\n if float(ratio20[x]) > 0:\n score += 1\n\n a = 0\n for i in range(len(ratio5_10sec[x])-1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n\n # 1 day change parameter score calculation\n if float(price_change_1_days[x]) > 5:\n score+=0.3\n # 3 day change parameter score calculation\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n # 5 day change parameter score calculation\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n # 7 day change parameter score calculation\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n # 10 day change parameter score calculation\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n\n # 10 minutes moving average parameter score calculation\n a=float(average_change_10_min[x])\n if a<0.2 and a>-0.3:\n score+=0.1\n # 20 minutes moving average parameter score calculation\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n # 50 minutes moving average parameter score calculation\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n # 100 minutes moving average parameter score calculation\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n\n # save score\n total_score[x] = score\n\n\ndef print_results():\n # sleep time before starting calculations\n time.sleep(10)\n\n while True:\n for x in range(len(symbols)):\n # calculate parameters percentages\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][- 6]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 480] )- 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][- 10:])) / 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][- 20:])) / 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][- 50:])) / 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][- 100:])) / 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) * 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n\n\n # call function for score calculation\n calculate_score()\n\n # select parameter for which data is sorted\n sort_by = total_score\n\n # sort data\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n # sort data in reverse order\n sorted_data.reverse()\n\n #print table header\n print (time.ctime())\n print ('%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (\n 'Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch', '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch','5d_ch', '7d_ch', '10d_ch'))\n\n # print top 10 cryptocurrencies data\n for k in range(10):\n i = sorted_data[k]\n print ('%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (\n symbols[i][:-3], total_score[i], ratio5[i], ratio20[i], price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i],price_chance_30_min[i], price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i],average_change_50_min[i], average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i], price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i], price_change_10_days[i]))\n\n # if score for one coin is > 10 will play sound\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n\n # Seconds to wait before repeating while loop\n time.sleep(1)\n\n# Declaring threads\nthreads = [threading.Thread(target=kline_continuum),\n threading.Thread(target=report_10_seconds),\n threading.Thread(target=print_results)]\n# Starting threads\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n\n\n",
"step-ids": [
5,
8,
9,
11,
12
]
}
|
[
5,
8,
9,
11,
12
] |
api_key = "your_key"
|
normal
|
{
"blob_id": "f024b0736f5fcdebede8d5b0985cf9d7170db8fc",
"index": 7401,
"step-1": "<mask token>\n",
"step-2": "api_key = 'your_key'\n",
"step-3": "api_key = \"your_key\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('devisa', '0021_auto_20190110_1256')]
operations = [migrations.RemoveField(model_name='entidade', name=
'bairro'), migrations.RemoveField(model_name='entidade', name=
'ent_cep'), migrations.RemoveField(model_name='entidade', name=
'ent_cnes'), migrations.RemoveField(model_name='entidade', name=
'ent_complemento'), migrations.RemoveField(model_name='entidade',
name='ent_dt_expedicao'), migrations.RemoveField(model_name=
'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(
model_name='entidade', name='ent_email'), migrations.RemoveField(
model_name='entidade', name='ent_endereco'), migrations.RemoveField
(model_name='entidade', name='ent_especializacao'), migrations.
RemoveField(model_name='entidade', name='ent_fantasia'), migrations
.RemoveField(model_name='entidade', name='ent_fax'), migrations.
RemoveField(model_name='entidade', name='ent_fone'), migrations.
RemoveField(model_name='entidade', name='ent_insc_estadual'),
migrations.RemoveField(model_name='entidade', name=
'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',
name='ent_numero'), migrations.RemoveField(model_name='entidade',
name='ent_obj_contrato_social'), migrations.RemoveField(model_name=
'entidade', name='ent_observacoes'), migrations.RemoveField(
model_name='entidade', name='ent_orgao_exp'), migrations.
RemoveField(model_name='entidade', name='ent_pasta_num'),
migrations.RemoveField(model_name='entidade', name=
'ent_registro_conselho'), migrations.RemoveField(model_name=
'entidade', name='ent_rg'), migrations.RemoveField(model_name=
'entidade', name='escolaridade'), migrations.RemoveField(model_name
='entidade', name='formacao_profissional'), migrations.RemoveField(
model_name='entidade', name='municipio'), migrations.RemoveField(
model_name='entidade', name='natureza_juridica_dependencia')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('devisa', '0021_auto_20190110_1256')]
operations = [migrations.RemoveField(model_name='entidade', name=
'bairro'), migrations.RemoveField(model_name='entidade', name=
'ent_cep'), migrations.RemoveField(model_name='entidade', name=
'ent_cnes'), migrations.RemoveField(model_name='entidade', name=
'ent_complemento'), migrations.RemoveField(model_name='entidade',
name='ent_dt_expedicao'), migrations.RemoveField(model_name=
'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(
model_name='entidade', name='ent_email'), migrations.RemoveField(
model_name='entidade', name='ent_endereco'), migrations.RemoveField
(model_name='entidade', name='ent_especializacao'), migrations.
RemoveField(model_name='entidade', name='ent_fantasia'), migrations
.RemoveField(model_name='entidade', name='ent_fax'), migrations.
RemoveField(model_name='entidade', name='ent_fone'), migrations.
RemoveField(model_name='entidade', name='ent_insc_estadual'),
migrations.RemoveField(model_name='entidade', name=
'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',
name='ent_numero'), migrations.RemoveField(model_name='entidade',
name='ent_obj_contrato_social'), migrations.RemoveField(model_name=
'entidade', name='ent_observacoes'), migrations.RemoveField(
model_name='entidade', name='ent_orgao_exp'), migrations.
RemoveField(model_name='entidade', name='ent_pasta_num'),
migrations.RemoveField(model_name='entidade', name=
'ent_registro_conselho'), migrations.RemoveField(model_name=
'entidade', name='ent_rg'), migrations.RemoveField(model_name=
'entidade', name='escolaridade'), migrations.RemoveField(model_name
='entidade', name='formacao_profissional'), migrations.RemoveField(
model_name='entidade', name='municipio'), migrations.RemoveField(
model_name='entidade', name='natureza_juridica_dependencia')]
<|reserved_special_token_1|>
# Generated by Django 2.1.4 on 2019-01-11 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devisa', '0021_auto_20190110_1256'),
]
operations = [
migrations.RemoveField(
model_name='entidade',
name='bairro',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cep',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cnes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_complemento',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_expedicao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_inicio_func',
),
migrations.RemoveField(
model_name='entidade',
name='ent_email',
),
migrations.RemoveField(
model_name='entidade',
name='ent_endereco',
),
migrations.RemoveField(
model_name='entidade',
name='ent_especializacao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fantasia',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fax',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fone',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_estadual',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_municipal',
),
migrations.RemoveField(
model_name='entidade',
name='ent_numero',
),
migrations.RemoveField(
model_name='entidade',
name='ent_obj_contrato_social',
),
migrations.RemoveField(
model_name='entidade',
name='ent_observacoes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_orgao_exp',
),
migrations.RemoveField(
model_name='entidade',
name='ent_pasta_num',
),
migrations.RemoveField(
model_name='entidade',
name='ent_registro_conselho',
),
migrations.RemoveField(
model_name='entidade',
name='ent_rg',
),
migrations.RemoveField(
model_name='entidade',
name='escolaridade',
),
migrations.RemoveField(
model_name='entidade',
name='formacao_profissional',
),
migrations.RemoveField(
model_name='entidade',
name='municipio',
),
migrations.RemoveField(
model_name='entidade',
name='natureza_juridica_dependencia',
),
]
|
flexible
|
{
"blob_id": "34f79fa3de68b53f19220697815e5bae5270d056",
"index": 9274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-01-11 11:58\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('devisa', '0021_auto_20190110_1256'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='entidade',\n name='bairro',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cep',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cnes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_complemento',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_expedicao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_inicio_func',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_email',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_endereco',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_especializacao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fantasia',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fax',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fone',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_estadual',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_municipal',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_numero',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_obj_contrato_social',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_observacoes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_orgao_exp',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_pasta_num',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_registro_conselho',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_rg',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='escolaridade',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='formacao_profissional',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='municipio',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='natureza_juridica_dependencia',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages():
return ['sqlpython']
classifiers = """Development Status :: 4 - Beta
Intended Audience :: Information Technology
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: SQL
Topic :: Database :: Front-Ends
Operating System :: OS Independent""".splitlines()
setup(name="sqlpython",
version="1.7.3",
description="Command-line interface to Oracle",
long_description="Customizable alternative to Oracle's SQL*PLUS command-line interface",
author="Luca Canali",
author_email="luca.canali@cern.ch",
url="http://packages.python.org/sqlpython",
packages=find_packages(),
include_package_data=True,
install_requires=['pyparsing','cmd2==0.6.3','gerald>=0.4.1.1',
'genshi==0.6'],
extras_require = {
'oracle': ['cx_Oracle==6.1'],
'postgres': ['psycopg2'],
},
keywords = 'client oracle database',
license = 'MIT',
platforms = ['any'],
entry_points = """
[console_scripts]
sqlpython = sqlpython.mysqlpy:run
editplot_sqlpython = sqlpython.editplot.bash"""
)
|
normal
|
{
"blob_id": "f960c95afe1f7a161e0144bb523bfaca117ae61e",
"index": 2260,
"step-1": "<mask token>\n",
"step-2": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n def find_packages():\n return ['sqlpython']\n<mask token>\nsetup(name='sqlpython', version='1.7.3', description=\n 'Command-line interface to Oracle', long_description=\n \"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author='Luca Canali', author_email='luca.canali@cern.ch', url=\n 'http://packages.python.org/sqlpython', packages=find_packages(),\n include_package_data=True, install_requires=['pyparsing', 'cmd2==0.6.3',\n 'gerald>=0.4.1.1', 'genshi==0.6'], extras_require={'oracle': [\n 'cx_Oracle==6.1'], 'postgres': ['psycopg2']}, keywords=\n 'client oracle database', license='MIT', platforms=['any'],\n entry_points=\n \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\"\n )\n",
"step-3": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n def find_packages():\n return ['sqlpython']\nclassifiers = (\n \"\"\"Development Status :: 4 - Beta\nIntended Audience :: Information Technology\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: SQL\nTopic :: Database :: Front-Ends\nOperating System :: OS Independent\"\"\"\n .splitlines())\nsetup(name='sqlpython', version='1.7.3', description=\n 'Command-line interface to Oracle', long_description=\n \"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author='Luca Canali', author_email='luca.canali@cern.ch', url=\n 'http://packages.python.org/sqlpython', packages=find_packages(),\n include_package_data=True, install_requires=['pyparsing', 'cmd2==0.6.3',\n 'gerald>=0.4.1.1', 'genshi==0.6'], extras_require={'oracle': [\n 'cx_Oracle==6.1'], 'postgres': ['psycopg2']}, keywords=\n 'client oracle database', license='MIT', platforms=['any'],\n entry_points=\n \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\"\n )\n",
"step-4": "try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n def find_packages():\n return ['sqlpython']\n \nclassifiers = \"\"\"Development Status :: 4 - Beta\nIntended Audience :: Information Technology\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: SQL\nTopic :: Database :: Front-Ends\nOperating System :: OS Independent\"\"\".splitlines()\n\nsetup(name=\"sqlpython\",\n version=\"1.7.3\",\n description=\"Command-line interface to Oracle\",\n long_description=\"Customizable alternative to Oracle's SQL*PLUS command-line interface\",\n author=\"Luca Canali\",\n author_email=\"luca.canali@cern.ch\",\n url=\"http://packages.python.org/sqlpython\",\n packages=find_packages(),\n include_package_data=True, \n install_requires=['pyparsing','cmd2==0.6.3','gerald>=0.4.1.1',\n 'genshi==0.6'],\n extras_require = {\n 'oracle': ['cx_Oracle==6.1'],\n 'postgres': ['psycopg2'],\n },\n keywords = 'client oracle database',\n license = 'MIT',\n platforms = ['any'],\n entry_points = \"\"\"\n [console_scripts]\n sqlpython = sqlpython.mysqlpy:run\n editplot_sqlpython = sqlpython.editplot.bash\"\"\" \n )\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.template import RequestContext
from whydjango.casestudies.forms import SubmitCaseStudyForm
def case_study_submission(request, template_name="casestudies/submit.html"):
form = SubmitCaseStudyForm(request.POST or None)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("submit_message"))
return render_to_response(template_name, {
"form": form,
}, context_instance=RequestContext(request))
|
normal
|
{
"blob_id": "fe3e104cf213b21c33a4b5c6e1a61315c4770eda",
"index": 6821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef case_study_submission(request, template_name='casestudies/submit.html'):\n form = SubmitCaseStudyForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('submit_message'))\n return render_to_response(template_name, {'form': form},\n context_instance=RequestContext(request))\n",
"step-3": "from django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom whydjango.casestudies.forms import SubmitCaseStudyForm\n\n\ndef case_study_submission(request, template_name='casestudies/submit.html'):\n form = SubmitCaseStudyForm(request.POST or None)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('submit_message'))\n return render_to_response(template_name, {'form': form},\n context_instance=RequestContext(request))\n",
"step-4": "from django.core.urlresolvers import reverse \nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext \n\n\nfrom whydjango.casestudies.forms import SubmitCaseStudyForm\n\ndef case_study_submission(request, template_name=\"casestudies/submit.html\"):\n\n form = SubmitCaseStudyForm(request.POST or None)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"submit_message\"))\n\n return render_to_response(template_name, { \n \"form\": form,\n }, context_instance=RequestContext(request)) \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This program is run at regular intervals to check the battery charge status of the uninterruptible power supply.
In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the
Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.
This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.
The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:
5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py
"""
import time
import datetime as dt
from subprocess import call
from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures
pidcmes = Pidcmes() # initialize pidcmese class
u_bat_min = 3.7 # minumum battery voltage
n_moy = 20 # averaging to reduce glitches
stop_run = False # to control the execution (run/stop)
u_avg = pidcmes.get_tension(n_moy) # read the value in volts
if u_avg < u_bat_min:# or i > 10:
print("proper shut down of the machine due to low battery")
# time.sleep(5)
# call("sudo shutdown -h now", shell=True) # shutdown the RASPI
else:
print("tout va bien dormez braves gens")
|
normal
|
{
"blob_id": "67b967b688aeac1270eee836e0f6e6b3555b933e",
"index": 5,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n",
"step-3": "<mask token>\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n",
"step-4": "<mask token>\nimport time\nimport datetime as dt\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes\npidcmes = Pidcmes()\nu_bat_min = 3.7\nn_moy = 20\nstop_run = False\nu_avg = pidcmes.get_tension(n_moy)\nif u_avg < u_bat_min:\n print('proper shut down of the machine due to low battery')\nelse:\n print('tout va bien dormez braves gens')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program is run at regular intervals to check the battery charge status of the uninterruptible power supply.\nIn our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the\nRaspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.\n\nThis program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.\nThe crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:\n5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py\n\"\"\"\n\nimport time\nimport datetime as dt\n\nfrom subprocess import call\nfrom pidcmes_lib import Pidcmes # class for 'pidcmes' procedures\n \npidcmes = Pidcmes() # initialize pidcmese class\n\nu_bat_min = 3.7 # minumum battery voltage \nn_moy = 20 # averaging to reduce glitches\nstop_run = False # to control the execution (run/stop)\n\nu_avg = pidcmes.get_tension(n_moy) # read the value in volts\n\n \nif u_avg < u_bat_min:# or i > 10: \n print(\"proper shut down of the machine due to low battery\")\n# time.sleep(5)\n# call(\"sudo shutdown -h now\", shell=True) # shutdown the RASPI\nelse:\n print(\"tout va bien dormez braves gens\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class StateConverters:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {entry['entity_id']: entry for entry in
screen.get('show_states', [])}
try:
self._protocol = create_protocol(screen['port_name'], screen[
'bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s", screen[
'port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(new_state, settings, self._protocol.
protocol)
elif settings['type'] == 'map':
StateConverters.send_map(new_state, settings, self._protocol.
protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + '_set_vp'
self._hass.bus.fire(eventName, {'vp': vp, 'value': value})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StateConverters:
@staticmethod
def extract_attr(state, attr):
if attr:
return state.attributes[attr]
else:
return state.as_dict()['state']
@staticmethod
def send_int(state, settings, protocol):
vp = settings['vp']
attr = settings.get('attribute', None)
try:
value = int(float(StateConverters.extract_attr(state, attr)))
protocol.write_vp(vp, value)
except Exception as er:
_LOGGER.error("Can't send value: %s", str(er))
@staticmethod
def send_map(state, settings, protocol):
vp = settings['vp']
map_state = settings['map']
attr = settings.get('attribute', None)
key = str(StateConverters.extract_attr(state, attr))
value = int(map_state[key])
protocol.write_vp(vp, value)
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {entry['entity_id']: entry for entry in
screen.get('show_states', [])}
try:
self._protocol = create_protocol(screen['port_name'], screen[
'bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s", screen[
'port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(new_state, settings, self._protocol.
protocol)
elif settings['type'] == 'map':
StateConverters.send_map(new_state, settings, self._protocol.
protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + '_set_vp'
self._hass.bus.fire(eventName, {'vp': vp, 'value': value})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities: Callable, discovery_info: Optional[
DiscoveryInfoType]=None) ->None:
sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]
async_add_entities(sensors, update_before_add=True)
class StateConverters:
@staticmethod
def extract_attr(state, attr):
if attr:
return state.attributes[attr]
else:
return state.as_dict()['state']
@staticmethod
def send_int(state, settings, protocol):
vp = settings['vp']
attr = settings.get('attribute', None)
try:
value = int(float(StateConverters.extract_attr(state, attr)))
protocol.write_vp(vp, value)
except Exception as er:
_LOGGER.error("Can't send value: %s", str(er))
@staticmethod
def send_map(state, settings, protocol):
vp = settings['vp']
map_state = settings['map']
attr = settings.get('attribute', None)
key = str(StateConverters.extract_attr(state, attr))
value = int(map_state[key])
protocol.write_vp(vp, value)
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {entry['entity_id']: entry for entry in
screen.get('show_states', [])}
try:
self._protocol = create_protocol(screen['port_name'], screen[
'bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s", screen[
'port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(new_state, settings, self._protocol.
protocol)
elif settings['type'] == 'map':
StateConverters.send_map(new_state, settings, self._protocol.
protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + '_set_vp'
self._hass.bus.fire(eventName, {'vp': vp, 'value': value})
<|reserved_special_token_1|>
import logging
from .const import DOMAIN, CONF_SCREENS
from typing import Any, Callable, Dict, Optional
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, HomeAssistantType
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change
from .dgus_protocol import create_protocol
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities: Callable, discovery_info: Optional[
DiscoveryInfoType]=None) ->None:
sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]
async_add_entities(sensors, update_before_add=True)
class StateConverters:
@staticmethod
def extract_attr(state, attr):
if attr:
return state.attributes[attr]
else:
return state.as_dict()['state']
@staticmethod
def send_int(state, settings, protocol):
vp = settings['vp']
attr = settings.get('attribute', None)
try:
value = int(float(StateConverters.extract_attr(state, attr)))
protocol.write_vp(vp, value)
except Exception as er:
_LOGGER.error("Can't send value: %s", str(er))
@staticmethod
def send_map(state, settings, protocol):
vp = settings['vp']
map_state = settings['map']
attr = settings.get('attribute', None)
key = str(StateConverters.extract_attr(state, attr))
value = int(map_state[key])
protocol.write_vp(vp, value)
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {entry['entity_id']: entry for entry in
screen.get('show_states', [])}
try:
self._protocol = create_protocol(screen['port_name'], screen[
'bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s", screen[
'port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(new_state, settings, self._protocol.
protocol)
elif settings['type'] == 'map':
StateConverters.send_map(new_state, settings, self._protocol.
protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + '_set_vp'
self._hass.bus.fire(eventName, {'vp': vp, 'value': value})
<|reserved_special_token_1|>
import logging
from .const import (
DOMAIN,
CONF_SCREENS
)
from typing import Any, Callable, Dict, Optional
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import (
ConfigType,
DiscoveryInfoType,
HomeAssistantType,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change
from .dgus_protocol import create_protocol
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities: Callable,
discovery_info: Optional[DiscoveryInfoType] = None,
) -> None:
sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]
async_add_entities(sensors, update_before_add=True)
class StateConverters:
@staticmethod
def extract_attr(state, attr):
if attr:
return state.attributes[attr]
else:
return state.as_dict()['state']
@staticmethod
def send_int(state, settings, protocol):
vp = settings['vp']
attr = settings.get('attribute', None)
try:
value = int(float(StateConverters.extract_attr(state, attr)))
protocol.write_vp(vp, value)
except Exception as er:
_LOGGER.error("Can't send value: %s", str(er))
@staticmethod
def send_map(state, settings, protocol):
vp = settings['vp']
map_state = settings['map']
attr = settings.get('attribute', None)
key = str(StateConverters.extract_attr(state, attr))
value = int(map_state[key])
protocol.write_vp(vp, value)
class DGUSScreen(Entity):
def __init__(self, hass, screen):
self._state = None
self._hass = hass
self._name = screen['name']
self._state_track_settings = {
entry['entity_id']: entry for entry in screen.get('show_states', [])}
try:
self._protocol = create_protocol(
screen['port_name'], screen['bound_rate'], self.on_data)
except Exception as er:
_LOGGER.error("Can't open serial port %s, : %s",
screen['port_name'], str(er))
entiti_ids = [entry['entity_id'] for entry in screen['show_states']]
async_track_state_change(hass, entiti_ids, self.state_listener)
def state_listener(self, entity, old_state, new_state):
settings = self._state_track_settings[entity]
if settings['type'] == 'int':
StateConverters.send_int(
new_state, settings, self._protocol.protocol)
elif settings['type'] == 'map':
StateConverters.send_map(
new_state, settings, self._protocol.protocol)
@property
def name(self):
return self._name
@property
def state(self):
return self._state
def on_data(self, vp, value):
"""fire event for data, received from screen"""
eventName = self.name + "_set_vp"
self._hass.bus.fire(eventName, {"vp": vp, "value": value})
|
flexible
|
{
"blob_id": "6f1b08a5ae1a07a30d89f3997461f4f97658f364",
"index": 4920,
"step-1": "<mask token>\n\n\nclass StateConverters:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n",
"step-2": "<mask token>\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n",
"step-3": "<mask token>\n\n\nasync def async_setup_platform(hass: HomeAssistantType, config: ConfigType,\n async_add_entities: Callable, discovery_info: Optional[\n DiscoveryInfoType]=None) ->None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n",
"step-4": "import logging\nfrom .const import DOMAIN, CONF_SCREENS\nfrom typing import Any, Callable, Dict, Optional\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, HomeAssistantType\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.event import async_track_state_change\nfrom .dgus_protocol import create_protocol\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass: HomeAssistantType, config: ConfigType,\n async_add_entities: Callable, discovery_info: Optional[\n DiscoveryInfoType]=None) ->None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {entry['entity_id']: entry for entry in\n screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(screen['port_name'], screen[\n 'bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\", screen[\n 'port_name'], str(er))\n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(new_state, settings, self._protocol.\n protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(new_state, settings, self._protocol.\n protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + '_set_vp'\n self._hass.bus.fire(eventName, {'vp': vp, 'value': value})\n",
"step-5": "import logging\nfrom .const import (\n DOMAIN,\n CONF_SCREENS\n)\nfrom typing import Any, Callable, Dict, Optional\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.typing import (\n ConfigType,\n DiscoveryInfoType,\n HomeAssistantType,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.event import async_track_state_change\nfrom .dgus_protocol import create_protocol\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(\n hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities: Callable,\n discovery_info: Optional[DiscoveryInfoType] = None,\n) -> None:\n sensors = [DGUSScreen(hass, screen) for screen in config[CONF_SCREENS]]\n async_add_entities(sensors, update_before_add=True)\n\n\nclass StateConverters:\n @staticmethod\n def extract_attr(state, attr):\n if attr:\n return state.attributes[attr]\n else:\n return state.as_dict()['state']\n\n @staticmethod\n def send_int(state, settings, protocol):\n vp = settings['vp']\n attr = settings.get('attribute', None)\n try:\n value = int(float(StateConverters.extract_attr(state, attr)))\n protocol.write_vp(vp, value)\n except Exception as er:\n _LOGGER.error(\"Can't send value: %s\", str(er))\n\n @staticmethod\n def send_map(state, settings, protocol):\n vp = settings['vp']\n map_state = settings['map']\n attr = settings.get('attribute', None)\n key = str(StateConverters.extract_attr(state, attr))\n value = int(map_state[key])\n protocol.write_vp(vp, value)\n\n\nclass DGUSScreen(Entity):\n def __init__(self, hass, screen):\n self._state = None\n self._hass = hass\n self._name = screen['name']\n self._state_track_settings = {\n entry['entity_id']: entry for entry in screen.get('show_states', [])}\n try:\n self._protocol = create_protocol(\n screen['port_name'], screen['bound_rate'], self.on_data)\n except Exception as er:\n _LOGGER.error(\"Can't open serial port %s, : %s\",\n screen['port_name'], str(er))\n \n entiti_ids = [entry['entity_id'] for entry in screen['show_states']]\n async_track_state_change(hass, entiti_ids, self.state_listener)\n\n def state_listener(self, entity, old_state, new_state):\n settings = self._state_track_settings[entity]\n if settings['type'] == 'int':\n StateConverters.send_int(\n new_state, settings, self._protocol.protocol)\n elif settings['type'] == 'map':\n StateConverters.send_map(\n new_state, settings, self._protocol.protocol)\n\n @property\n def name(self):\n return self._name\n\n @property\n def state(self):\n return self._state\n\n def on_data(self, vp, value):\n \"\"\"fire event for data, received from screen\"\"\"\n eventName = self.name + \"_set_vp\"\n self._hass.bus.fire(eventName, {\"vp\": vp, \"value\": value})\n",
"step-ids": [
7,
10,
11,
13,
14
]
}
|
[
7,
10,
11,
13,
14
] |
from django.shortcuts import render, redirect
# Create your views here.
from item.models import Item, Unit
def str_to_bool(s):
return True if s.lower() == 'true' else False
def item(request):
if not request.session.get('is_login', None):
return redirect('/item/item')
else:
item_list = Item.objects.all()
return render(request, 'item/item.html', locals())
def add_item(request):
if request.method == 'GET':
last_item_info = Item.objects.last()
unit_list=Unit.objects.all()
return render(request, 'item/add_item.html', locals())
else:
item_index = request.POST.get('item_index')
item_chinese_name = request.POST.get('item_chinese_name')
item_english_name = request.POST.get('item_english_name')
item_method = request.POST.get('item_method')
item_unit = request.POST.get('item_unit')
is_calc = request.POST.get('is_calc')
is_use = request.POST.get('is_use')
unit_info=Unit.objects.get(id=item_unit)
new_item = Item(item_index=int(item_index), item_chinese_name=item_chinese_name,
item_english_name=item_english_name,item_method=item_method,item_unit=unit_info,is_calc=str_to_bool(is_calc),
is_use=str_to_bool(is_use))
new_item.save()
return redirect('/item/item/')
def edit_item(request):
if request.method == 'GET':
nid = request.GET.get('nid')
item_info = Item.objects.get(id=nid)
unit_list = Unit.objects.all()
return render(request, 'item/edit_item.html', locals())
else:
nid = request.GET.get('nid')
item_index = request.POST.get('item_index')
item_chinese_name = request.POST.get('item_chinese_name')
item_english_name = request.POST.get('item_english_name')
item_method = request.POST.get('item_method')
item_unit = request.POST.get('item_unit')
is_calc = request.POST.get('is_calc')
is_use = request.POST.get('is_use')
unit_info = Unit.objects.get(id=item_unit)
item_info = Item.objects.get(id=nid)
item_info.item_index = item_index
item_info.item_chinese_name = item_chinese_name
item_info.item_english_name = item_english_name
item_info.item_method = item_method
item_info.item_unit = unit_info
item_info.is_calc = str_to_bool(is_calc)
item_info.is_use = str_to_bool(is_use)
item_info.save()
return redirect('/item/item/')
def del_item(request):
nid = request.GET.get('nid')
item_info = Unit.objects.filter(id=nid)
item_info.delete()
return redirect('/item/item/')
def unit(request):
if not request.session.get('is_login', None):
return redirect('/item/unit')
else:
unit_list = Unit.objects.all()
return render(request, 'item/unit.html', locals())
def add_unit(request):
if request.method == 'GET':
last_unit_info = Unit.objects.last()
return render(request, 'item/add_unit.html', locals())
else:
unit_index = request.POST.get('unit_index')
unit_name = request.POST.get('unit_name')
new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name,)
new_unit.save()
return redirect('/item/unit/')
def edit_unit(request):
if request.method == 'GET':
nid = request.GET.get('nid')
unit_info = Unit.objects.get(id=nid)
return render(request, 'item/edit_unit.html', locals())
else:
nid = request.GET.get('nid')
unit_index = request.POST.get('unit_index')
unit_name = request.POST.get('unit_name')
unit_info = Unit.objects.get(id=nid)
unit_info.unit_index = unit_index
unit_info.unit_name = unit_name
unit_info.save()
return redirect('/item/unit/')
def del_unit(request):
nid = request.GET.get('nid')
unit_info = Unit.objects.filter(id=nid)
unit_info.delete()
return redirect('/item/unit/')
|
normal
|
{
"blob_id": "22b2ebdbb48caa593bece030d238089a0aa27053",
"index": 1983,
"step-1": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-2": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-3": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-4": "<mask token>\n\n\ndef str_to_bool(s):\n return True if s.lower() == 'true' else False\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\ndef add_item(request):\n if request.method == 'GET':\n last_item_info = Item.objects.last()\n unit_list = Unit.objects.all()\n return render(request, 'item/add_item.html', locals())\n else:\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n new_item = Item(item_index=int(item_index), item_chinese_name=\n item_chinese_name, item_english_name=item_english_name,\n item_method=item_method, item_unit=unit_info, is_calc=\n str_to_bool(is_calc), is_use=str_to_bool(is_use))\n new_item.save()\n return redirect('/item/item/')\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-5": "from django.shortcuts import render, redirect\n\n\n# Create your views here.\nfrom item.models import Item, Unit\n\n\ndef str_to_bool(s):\n return True if s.lower() == 'true' else False\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\ndef add_item(request):\n if request.method == 'GET':\n last_item_info = Item.objects.last()\n unit_list=Unit.objects.all()\n return render(request, 'item/add_item.html', locals())\n else:\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n\n unit_info=Unit.objects.get(id=item_unit)\n new_item = Item(item_index=int(item_index), item_chinese_name=item_chinese_name,\n item_english_name=item_english_name,item_method=item_method,item_unit=unit_info,is_calc=str_to_bool(is_calc),\n is_use=str_to_bool(is_use))\n new_item.save()\n return redirect('/item/item/')\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\ndef del_item(request):\n nid = request.GET.get('nid')\n item_info = Unit.objects.filter(id=nid)\n item_info.delete()\n return redirect('/item/item/')\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name,)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')",
"step-ids": [
4,
5,
6,
8,
11
]
}
|
[
4,
5,
6,
8,
11
] |
from enum import Enum
class CellState(Enum):
EMPTY = 1
DEAD = 2
ALIVE = 3
WAS_ALIVE = 4
def __str__(self):
default_str = super(CellState, self).__str__()
if default_str == "CellState.EMPTY":
return "E"
elif default_str == "CellState.DEAD":
return "D"
elif default_str == "CellState.ALIVE":
return "A"
elif default_str == "CellState.WAS_ALIVE":
return "W"
else:
return "?"
|
normal
|
{
"blob_id": "29bee4ef11281380aa05d22ef54cb76502ecd685",
"index": 466,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CellState(Enum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n default_str = super(CellState, self).__str__()\n if default_str == 'CellState.EMPTY':\n return 'E'\n elif default_str == 'CellState.DEAD':\n return 'D'\n elif default_str == 'CellState.ALIVE':\n return 'A'\n elif default_str == 'CellState.WAS_ALIVE':\n return 'W'\n else:\n return '?'\n",
"step-3": "<mask token>\n\n\nclass CellState(Enum):\n EMPTY = 1\n DEAD = 2\n ALIVE = 3\n WAS_ALIVE = 4\n\n def __str__(self):\n default_str = super(CellState, self).__str__()\n if default_str == 'CellState.EMPTY':\n return 'E'\n elif default_str == 'CellState.DEAD':\n return 'D'\n elif default_str == 'CellState.ALIVE':\n return 'A'\n elif default_str == 'CellState.WAS_ALIVE':\n return 'W'\n else:\n return '?'\n",
"step-4": "from enum import Enum\n\n\nclass CellState(Enum):\n EMPTY = 1\n DEAD = 2\n ALIVE = 3\n WAS_ALIVE = 4\n\n def __str__(self):\n default_str = super(CellState, self).__str__()\n if default_str == 'CellState.EMPTY':\n return 'E'\n elif default_str == 'CellState.DEAD':\n return 'D'\n elif default_str == 'CellState.ALIVE':\n return 'A'\n elif default_str == 'CellState.WAS_ALIVE':\n return 'W'\n else:\n return '?'\n",
"step-5": "from enum import Enum\n\nclass CellState(Enum):\n EMPTY = 1\n DEAD = 2\n ALIVE = 3\n WAS_ALIVE = 4\n\n def __str__(self):\n default_str = super(CellState, self).__str__()\n if default_str == \"CellState.EMPTY\":\n return \"E\"\n elif default_str == \"CellState.DEAD\":\n return \"D\"\n elif default_str == \"CellState.ALIVE\":\n return \"A\"\n elif default_str == \"CellState.WAS_ALIVE\":\n return \"W\"\n else:\n return \"?\"\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def mean_std(loader):
mean = 0
std = 0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(loader.dataset)
std /= len(loader.dataset)
return mean, std
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Total dataset images: ', len(dataset))
<|reserved_special_token_0|>
def mean_std(loader):
mean = 0
std = 0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(loader.dataset)
std /= len(loader.dataset)
return mean, std
<|reserved_special_token_0|>
print(f'Mean: {mean}')
print(f'Std: {std}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
batch_size = 256
data_dir = 'nut_snacks/dataset/'
data_transforms = transforms.Compose([transforms.RandomResizedCrop(128),
transforms.ToTensor()])
dataset = ImageFolder(data_dir, transform=data_transforms)
print('Total dataset images: ', len(dataset))
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
def mean_std(loader):
mean = 0
std = 0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(loader.dataset)
std /= len(loader.dataset)
return mean, std
mean, std = mean_std(loader)
print(f'Mean: {mean}')
print(f'Std: {std}')
<|reserved_special_token_1|>
import torch
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import random_split
from torchvision.datasets import ImageFolder
batch_size = 256
data_dir = 'nut_snacks/dataset/'
data_transforms = transforms.Compose([transforms.RandomResizedCrop(128),
transforms.ToTensor()])
dataset = ImageFolder(data_dir, transform=data_transforms)
print('Total dataset images: ', len(dataset))
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
def mean_std(loader):
mean = 0
std = 0
for images, _ in loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(loader.dataset)
std /= len(loader.dataset)
return mean, std
mean, std = mean_std(loader)
print(f'Mean: {mean}')
print(f'Std: {std}')
<|reserved_special_token_1|>
import torch
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
# load the data Set
from torch.utils.data import random_split
from torchvision.datasets import ImageFolder
batch_size = 256
data_dir = 'nut_snacks/dataset/'
data_transforms = transforms.Compose(
[transforms.RandomResizedCrop(128),
transforms.ToTensor(),
])
dataset = ImageFolder(data_dir, transform=data_transforms)
print('Total dataset images: ',len(dataset))
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size)
def mean_std(loader):
mean = 0
std = 0
for images, _ in loader :
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(loader.dataset)
std /= len(loader.dataset)
return mean,std
mean, std = mean_std(loader)
print(f'Mean: {mean}')
print(f'Std: {std}')
|
flexible
|
{
"blob_id": "4156b003210a41d6ec8f30e2d20adfb1f4b3deb0",
"index": 6024,
"step-1": "<mask token>\n\n\ndef mean_std(loader):\n mean = 0\n std = 0\n for images, _ in loader:\n batch_samples = images.size(0)\n images = images.view(batch_samples, images.size(1), -1)\n mean += images.mean(2).sum(0)\n std += images.std(2).sum(0)\n mean /= len(loader.dataset)\n std /= len(loader.dataset)\n return mean, std\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Total dataset images: ', len(dataset))\n<mask token>\n\n\ndef mean_std(loader):\n mean = 0\n std = 0\n for images, _ in loader:\n batch_samples = images.size(0)\n images = images.view(batch_samples, images.size(1), -1)\n mean += images.mean(2).sum(0)\n std += images.std(2).sum(0)\n mean /= len(loader.dataset)\n std /= len(loader.dataset)\n return mean, std\n\n\n<mask token>\nprint(f'Mean: {mean}')\nprint(f'Std: {std}')\n",
"step-3": "<mask token>\nbatch_size = 256\ndata_dir = 'nut_snacks/dataset/'\ndata_transforms = transforms.Compose([transforms.RandomResizedCrop(128),\n transforms.ToTensor()])\ndataset = ImageFolder(data_dir, transform=data_transforms)\nprint('Total dataset images: ', len(dataset))\nloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)\n\n\ndef mean_std(loader):\n mean = 0\n std = 0\n for images, _ in loader:\n batch_samples = images.size(0)\n images = images.view(batch_samples, images.size(1), -1)\n mean += images.mean(2).sum(0)\n std += images.std(2).sum(0)\n mean /= len(loader.dataset)\n std /= len(loader.dataset)\n return mean, std\n\n\nmean, std = mean_std(loader)\nprint(f'Mean: {mean}')\nprint(f'Std: {std}')\n",
"step-4": "import torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data import random_split\nfrom torchvision.datasets import ImageFolder\nbatch_size = 256\ndata_dir = 'nut_snacks/dataset/'\ndata_transforms = transforms.Compose([transforms.RandomResizedCrop(128),\n transforms.ToTensor()])\ndataset = ImageFolder(data_dir, transform=data_transforms)\nprint('Total dataset images: ', len(dataset))\nloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)\n\n\ndef mean_std(loader):\n mean = 0\n std = 0\n for images, _ in loader:\n batch_samples = images.size(0)\n images = images.view(batch_samples, images.size(1), -1)\n mean += images.mean(2).sum(0)\n std += images.std(2).sum(0)\n mean /= len(loader.dataset)\n std /= len(loader.dataset)\n return mean, std\n\n\nmean, std = mean_std(loader)\nprint(f'Mean: {mean}')\nprint(f'Std: {std}')\n",
"step-5": "import torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset, DataLoader\n # load the data Set\n\nfrom torch.utils.data import random_split\nfrom torchvision.datasets import ImageFolder\n\n\nbatch_size = 256\ndata_dir = 'nut_snacks/dataset/'\n\ndata_transforms = transforms.Compose(\n [transforms.RandomResizedCrop(128),\n \n transforms.ToTensor(),\n ])\n\ndataset = ImageFolder(data_dir, transform=data_transforms)\nprint('Total dataset images: ',len(dataset))\n\n\nloader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size)\n\n\n\n\ndef mean_std(loader):\n\tmean = 0\n\tstd = 0\n\tfor images, _ in loader :\n\t\tbatch_samples = images.size(0)\n\t\timages = images.view(batch_samples, images.size(1), -1)\n\t\tmean += images.mean(2).sum(0)\n\t\tstd += images.std(2).sum(0)\n\tmean /= len(loader.dataset)\n\tstd /= len(loader.dataset) \n\treturn mean,std\n\nmean, std = mean_std(loader)\n\nprint(f'Mean: {mean}')\n\nprint(f'Std: {std}')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest
from domain.Activity import Activity
from domain.NABException import NABException
from domain.Person import Person
from domain.ActivityValidator import ActivityValidator
from repository.PersonRepository import PersonRepository
from repository.PersonFileRepository import PersonFileRepository
from repository.ActivityRepository import ActivityRepository
from repository.ActivityFileRepository import ActivityFileRepository
from controller.StatsController import StatsController
class StatsControllerTestCase(unittest.TestCase):
def setUp(self):
pR = PersonRepository()
aR = ActivityRepository()
self.L = StatsController(pR, aR)
self.p = Person(1, "John", "1", "A")
self.q = Person(2, "Mary", "1", "B")
self.a1 = Activity(self.p, "2015.12.20", "12:12", "Swimming")
self.a2 = Activity(self.p, "2016.01.20", "12:12", "Mapping")
self.a3 = Activity(self.q, "2015.12.21", "12:12", "Swimming")
self.a4 = Activity(self.q, "2015.12.20", "10:12", "Reading")
pR.add(self.p)
pR.add(self.q)
aR.add(self.a1)
aR.add(self.a2)
aR.add(self.a3)
aR.add(self.a4)
def test_activities_for_person_alphabetically(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_for_person_alphabetically(1) == [a2, a1]
assert L.activities_for_person_alphabetically(2) == [a4, a3]
assert L.activities_for_person_alphabetically(4) == []
def test_activities_for_person_by_date(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_for_person_by_date(1) == [a1, a2]
assert L.activities_for_person_by_date(2) == [a4, a3]
assert L.activities_for_person_by_date(4) == []
def test_people_with_activities_in_interval(self):
L = self.L
p = self.p
q = self.q
assert L.people_with_activities_in_interval("2015.12.20", "2016.01.01") == [p, q]
assert L.people_with_activities_in_interval("2000.01.01", "2010.01.01") == []
assert L.people_with_activities_in_interval("2016.01.01", "2017.01.01") == [p]
assert L.people_with_activities_in_interval("2015.12.21", "2015.12.21") == [q]
def test_activities_in_interval_alphabetically(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_in_interval_alphabetically("2015.12.20", "2016.01.01") == [a4, a1, a3]
assert L.activities_in_interval_alphabetically("2000.01.01", "2010.01.01") == []
assert L.activities_in_interval_alphabetically("2016.01.01", "2017.01.01") == [a2]
assert L.activities_in_interval_alphabetically("2015.12.21", "2015.12.21") == [a3]
def test_activities_in_interval_by_date(self):
L = self.L
a1 = self.a1
a2 = self.a2
a3 = self.a3
a4 = self.a4
assert L.activities_in_interval_by_date("2015.12.20", "2016.01.01") == [a4, a1, a3]
assert L.activities_in_interval_by_date("2000.01.01", "2010.01.01") == []
assert L.activities_in_interval_by_date("2016.01.01", "2017.01.01") == [a2]
assert L.activities_in_interval_by_date("2015.12.21", "2015.12.21") == [a3]
|
normal
|
{
"blob_id": "130581ddb0394dcceabc316468385d4e21959b63",
"index": 8682,
"step-1": "<mask token>\n\n\nclass StatsControllerTestCase(unittest.TestCase):\n\n def setUp(self):\n pR = PersonRepository()\n aR = ActivityRepository()\n self.L = StatsController(pR, aR)\n self.p = Person(1, 'John', '1', 'A')\n self.q = Person(2, 'Mary', '1', 'B')\n self.a1 = Activity(self.p, '2015.12.20', '12:12', 'Swimming')\n self.a2 = Activity(self.p, '2016.01.20', '12:12', 'Mapping')\n self.a3 = Activity(self.q, '2015.12.21', '12:12', 'Swimming')\n self.a4 = Activity(self.q, '2015.12.20', '10:12', 'Reading')\n pR.add(self.p)\n pR.add(self.q)\n aR.add(self.a1)\n aR.add(self.a2)\n aR.add(self.a3)\n aR.add(self.a4)\n <mask token>\n <mask token>\n\n def test_people_with_activities_in_interval(self):\n L = self.L\n p = self.p\n q = self.q\n assert L.people_with_activities_in_interval('2015.12.20', '2016.01.01'\n ) == [p, q]\n assert L.people_with_activities_in_interval('2000.01.01', '2010.01.01'\n ) == []\n assert L.people_with_activities_in_interval('2016.01.01', '2017.01.01'\n ) == [p]\n assert L.people_with_activities_in_interval('2015.12.21', '2015.12.21'\n ) == [q]\n <mask token>\n\n def test_activities_in_interval_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_in_interval_by_date('2015.12.20', '2016.01.01'\n ) == [a4, a1, a3]\n assert L.activities_in_interval_by_date('2000.01.01', '2010.01.01'\n ) == []\n assert L.activities_in_interval_by_date('2016.01.01', '2017.01.01'\n ) == [a2]\n assert L.activities_in_interval_by_date('2015.12.21', '2015.12.21'\n ) == [a3]\n",
"step-2": "<mask token>\n\n\nclass StatsControllerTestCase(unittest.TestCase):\n\n def setUp(self):\n pR = PersonRepository()\n aR = ActivityRepository()\n self.L = StatsController(pR, aR)\n self.p = Person(1, 'John', '1', 'A')\n self.q = Person(2, 'Mary', '1', 'B')\n self.a1 = Activity(self.p, '2015.12.20', '12:12', 'Swimming')\n self.a2 = Activity(self.p, '2016.01.20', '12:12', 'Mapping')\n self.a3 = Activity(self.q, '2015.12.21', '12:12', 'Swimming')\n self.a4 = Activity(self.q, '2015.12.20', '10:12', 'Reading')\n pR.add(self.p)\n pR.add(self.q)\n aR.add(self.a1)\n aR.add(self.a2)\n aR.add(self.a3)\n aR.add(self.a4)\n <mask token>\n\n def test_activities_for_person_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_for_person_by_date(1) == [a1, a2]\n assert L.activities_for_person_by_date(2) == [a4, a3]\n assert L.activities_for_person_by_date(4) == []\n\n def test_people_with_activities_in_interval(self):\n L = self.L\n p = self.p\n q = self.q\n assert L.people_with_activities_in_interval('2015.12.20', '2016.01.01'\n ) == [p, q]\n assert L.people_with_activities_in_interval('2000.01.01', '2010.01.01'\n ) == []\n assert L.people_with_activities_in_interval('2016.01.01', '2017.01.01'\n ) == [p]\n assert L.people_with_activities_in_interval('2015.12.21', '2015.12.21'\n ) == [q]\n <mask token>\n\n def test_activities_in_interval_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_in_interval_by_date('2015.12.20', '2016.01.01'\n ) == [a4, a1, a3]\n assert L.activities_in_interval_by_date('2000.01.01', '2010.01.01'\n ) == []\n assert L.activities_in_interval_by_date('2016.01.01', '2017.01.01'\n ) == [a2]\n assert L.activities_in_interval_by_date('2015.12.21', '2015.12.21'\n ) == [a3]\n",
"step-3": "<mask token>\n\n\nclass StatsControllerTestCase(unittest.TestCase):\n\n def setUp(self):\n pR = PersonRepository()\n aR = ActivityRepository()\n self.L = StatsController(pR, aR)\n self.p = Person(1, 'John', '1', 'A')\n self.q = Person(2, 'Mary', '1', 'B')\n self.a1 = Activity(self.p, '2015.12.20', '12:12', 'Swimming')\n self.a2 = Activity(self.p, '2016.01.20', '12:12', 'Mapping')\n self.a3 = Activity(self.q, '2015.12.21', '12:12', 'Swimming')\n self.a4 = Activity(self.q, '2015.12.20', '10:12', 'Reading')\n pR.add(self.p)\n pR.add(self.q)\n aR.add(self.a1)\n aR.add(self.a2)\n aR.add(self.a3)\n aR.add(self.a4)\n\n def test_activities_for_person_alphabetically(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_for_person_alphabetically(1) == [a2, a1]\n assert L.activities_for_person_alphabetically(2) == [a4, a3]\n assert L.activities_for_person_alphabetically(4) == []\n\n def test_activities_for_person_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_for_person_by_date(1) == [a1, a2]\n assert L.activities_for_person_by_date(2) == [a4, a3]\n assert L.activities_for_person_by_date(4) == []\n\n def test_people_with_activities_in_interval(self):\n L = self.L\n p = self.p\n q = self.q\n assert L.people_with_activities_in_interval('2015.12.20', '2016.01.01'\n ) == [p, q]\n assert L.people_with_activities_in_interval('2000.01.01', '2010.01.01'\n ) == []\n assert L.people_with_activities_in_interval('2016.01.01', '2017.01.01'\n ) == [p]\n assert L.people_with_activities_in_interval('2015.12.21', '2015.12.21'\n ) == [q]\n <mask token>\n\n def test_activities_in_interval_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_in_interval_by_date('2015.12.20', '2016.01.01'\n ) == [a4, a1, a3]\n assert L.activities_in_interval_by_date('2000.01.01', '2010.01.01'\n ) == []\n assert L.activities_in_interval_by_date('2016.01.01', '2017.01.01'\n ) == [a2]\n assert L.activities_in_interval_by_date('2015.12.21', '2015.12.21'\n ) == [a3]\n",
"step-4": "<mask token>\n\n\nclass StatsControllerTestCase(unittest.TestCase):\n\n def setUp(self):\n pR = PersonRepository()\n aR = ActivityRepository()\n self.L = StatsController(pR, aR)\n self.p = Person(1, 'John', '1', 'A')\n self.q = Person(2, 'Mary', '1', 'B')\n self.a1 = Activity(self.p, '2015.12.20', '12:12', 'Swimming')\n self.a2 = Activity(self.p, '2016.01.20', '12:12', 'Mapping')\n self.a3 = Activity(self.q, '2015.12.21', '12:12', 'Swimming')\n self.a4 = Activity(self.q, '2015.12.20', '10:12', 'Reading')\n pR.add(self.p)\n pR.add(self.q)\n aR.add(self.a1)\n aR.add(self.a2)\n aR.add(self.a3)\n aR.add(self.a4)\n\n def test_activities_for_person_alphabetically(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_for_person_alphabetically(1) == [a2, a1]\n assert L.activities_for_person_alphabetically(2) == [a4, a3]\n assert L.activities_for_person_alphabetically(4) == []\n\n def test_activities_for_person_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_for_person_by_date(1) == [a1, a2]\n assert L.activities_for_person_by_date(2) == [a4, a3]\n assert L.activities_for_person_by_date(4) == []\n\n def test_people_with_activities_in_interval(self):\n L = self.L\n p = self.p\n q = self.q\n assert L.people_with_activities_in_interval('2015.12.20', '2016.01.01'\n ) == [p, q]\n assert L.people_with_activities_in_interval('2000.01.01', '2010.01.01'\n ) == []\n assert L.people_with_activities_in_interval('2016.01.01', '2017.01.01'\n ) == [p]\n assert L.people_with_activities_in_interval('2015.12.21', '2015.12.21'\n ) == [q]\n\n def test_activities_in_interval_alphabetically(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_in_interval_alphabetically('2015.12.20',\n '2016.01.01') == [a4, a1, a3]\n assert L.activities_in_interval_alphabetically('2000.01.01',\n '2010.01.01') == []\n assert L.activities_in_interval_alphabetically('2016.01.01',\n '2017.01.01') == [a2]\n assert L.activities_in_interval_alphabetically('2015.12.21',\n '2015.12.21') == [a3]\n\n def test_activities_in_interval_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n assert L.activities_in_interval_by_date('2015.12.20', '2016.01.01'\n ) == [a4, a1, a3]\n assert L.activities_in_interval_by_date('2000.01.01', '2010.01.01'\n ) == []\n assert L.activities_in_interval_by_date('2016.01.01', '2017.01.01'\n ) == [a2]\n assert L.activities_in_interval_by_date('2015.12.21', '2015.12.21'\n ) == [a3]\n",
"step-5": "import unittest\nfrom domain.Activity import Activity\nfrom domain.NABException import NABException\nfrom domain.Person import Person\nfrom domain.ActivityValidator import ActivityValidator\nfrom repository.PersonRepository import PersonRepository\nfrom repository.PersonFileRepository import PersonFileRepository\nfrom repository.ActivityRepository import ActivityRepository\nfrom repository.ActivityFileRepository import ActivityFileRepository\nfrom controller.StatsController import StatsController\n\n\nclass StatsControllerTestCase(unittest.TestCase):\n\n def setUp(self):\n pR = PersonRepository()\n aR = ActivityRepository()\n self.L = StatsController(pR, aR)\n self.p = Person(1, \"John\", \"1\", \"A\")\n self.q = Person(2, \"Mary\", \"1\", \"B\")\n self.a1 = Activity(self.p, \"2015.12.20\", \"12:12\", \"Swimming\")\n self.a2 = Activity(self.p, \"2016.01.20\", \"12:12\", \"Mapping\")\n self.a3 = Activity(self.q, \"2015.12.21\", \"12:12\", \"Swimming\")\n self.a4 = Activity(self.q, \"2015.12.20\", \"10:12\", \"Reading\")\n\n pR.add(self.p)\n pR.add(self.q)\n aR.add(self.a1)\n aR.add(self.a2)\n aR.add(self.a3)\n aR.add(self.a4)\n\n\n def test_activities_for_person_alphabetically(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n\n assert L.activities_for_person_alphabetically(1) == [a2, a1]\n assert L.activities_for_person_alphabetically(2) == [a4, a3]\n assert L.activities_for_person_alphabetically(4) == []\n\n\n def test_activities_for_person_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n\n assert L.activities_for_person_by_date(1) == [a1, a2]\n assert L.activities_for_person_by_date(2) == [a4, a3]\n assert L.activities_for_person_by_date(4) == []\n\n\n def test_people_with_activities_in_interval(self):\n L = self.L\n p = self.p\n q = self.q\n\n assert L.people_with_activities_in_interval(\"2015.12.20\", \"2016.01.01\") == [p, q]\n assert L.people_with_activities_in_interval(\"2000.01.01\", \"2010.01.01\") == []\n assert L.people_with_activities_in_interval(\"2016.01.01\", \"2017.01.01\") == [p]\n assert L.people_with_activities_in_interval(\"2015.12.21\", \"2015.12.21\") == [q]\n\n\n def test_activities_in_interval_alphabetically(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n\n assert L.activities_in_interval_alphabetically(\"2015.12.20\", \"2016.01.01\") == [a4, a1, a3]\n assert L.activities_in_interval_alphabetically(\"2000.01.01\", \"2010.01.01\") == []\n assert L.activities_in_interval_alphabetically(\"2016.01.01\", \"2017.01.01\") == [a2]\n assert L.activities_in_interval_alphabetically(\"2015.12.21\", \"2015.12.21\") == [a3]\n\n\n def test_activities_in_interval_by_date(self):\n L = self.L\n a1 = self.a1\n a2 = self.a2\n a3 = self.a3\n a4 = self.a4\n\n assert L.activities_in_interval_by_date(\"2015.12.20\", \"2016.01.01\") == [a4, a1, a3]\n assert L.activities_in_interval_by_date(\"2000.01.01\", \"2010.01.01\") == []\n assert L.activities_in_interval_by_date(\"2016.01.01\", \"2017.01.01\") == [a2]\n assert L.activities_in_interval_by_date(\"2015.12.21\", \"2015.12.21\") == [a3]",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name, operator_class, occurrence, test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)
module_diff = ['--- mutation diff ---']
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'), fromfile='a' +
module_source_file, tofile='b' + module_source_file,
lineterm=''):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.
NORMAL})
rec.update(core.activation_record)
return rec
except Exception:
return WorkItem(data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT, worker_outcome=
WorkerOutcome.EXCEPTION)
def worker_process(work_item, timeout, config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**
work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=
subprocess.PIPE, universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({k: v for k, v in result.items() if v is not None})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import typing
except ImportError:
pass
<|reserved_special_token_0|>
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name, operator_class, occurrence, test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)
module_diff = ['--- mutation diff ---']
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'), fromfile='a' +
module_source_file, tofile='b' + module_source_file,
lineterm=''):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.
NORMAL})
rec.update(core.activation_record)
return rec
except Exception:
return WorkItem(data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT, worker_outcome=
WorkerOutcome.EXCEPTION)
def worker_process(work_item, timeout, config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**
work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=
subprocess.PIPE, universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({k: v for k, v in result.items() if v is not None})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import difflib
import importlib
import inspect
import json
import logging
import subprocess
import sys
import traceback
import astunparse
try:
import typing
except ImportError:
pass
from .config import serialize_config
from .importing import preserve_modules, using_ast
from .mutating import MutatingCore
from .parsing import get_ast
from .testing.test_runner import TestOutcome
from .work_item import WorkItem
log = logging.getLogger()
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name, operator_class, occurrence, test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)
module_diff = ['--- mutation diff ---']
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'), fromfile='a' +
module_source_file, tofile='b' + module_source_file,
lineterm=''):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.
NORMAL})
rec.update(core.activation_record)
return rec
except Exception:
return WorkItem(data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT, worker_outcome=
WorkerOutcome.EXCEPTION)
def worker_process(work_item, timeout, config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**
work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=
subprocess.PIPE, universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({k: v for k, v in result.items() if v is not None})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
<|reserved_special_token_1|>
"""This is the body of the low-level worker tool.
A worker is intended to run as a process that imports a module, mutates it in
one location with one operator, runs the tests, reports the results, and dies.
"""
import difflib
import importlib
import inspect
import json
import logging
import subprocess
import sys
import traceback
import astunparse
try:
import typing # the typing module does some fancy stuff at import time
# which we shall not do twice... by loading it here,
# preserve_modules does not delete it and therefore
# fancy stuff happens only once
except ImportError:
pass
from .config import serialize_config
from .importing import preserve_modules, using_ast
from .mutating import MutatingCore
from .parsing import get_ast
from .testing.test_runner import TestOutcome
from .work_item import WorkItem
log = logging.getLogger()
class WorkerOutcome:
"""Possible outcomes for a worker.
"""
NORMAL = 'normal'
EXCEPTION = 'exception'
NO_TEST = 'no-test'
TIMEOUT = 'timeout'
SKIPPED = 'skipped'
def worker(module_name,
operator_class,
occurrence,
test_runner):
"""Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the
tests, and report the results.
This is fundamentally the single-mutation-and-test-run process
implementation.
There are three high-level ways that a worker can finish. First, it could
fail exceptionally, meaning that some uncaught exception made its way from
some part of the operation to terminate the function. This function will
intercept all exceptions and return it in a non-exceptional structure.
Second, the mutation testing machinery may determine that there is no
OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this
case there is no way to report a test result (i.e. killed, survived, or
incompetent) so a special value is returned indicating that no mutation is
possible.
Finally, and hopefully normally, the worker will find that it can run a
test. It will do so and report back the result - killed, survived, or
incompetent - in a structured way.
Returns: a WorkItem
Raises: This will generally not raise any exceptions. Rather, exceptions
will be reported using the 'exception' result-type in the return value.
"""
try:
with preserve_modules():
module = importlib.import_module(module_name)
module_source_file = inspect.getsourcefile(module)
module_ast = get_ast(module)
module_source = astunparse.unparse(module_ast)
core = MutatingCore(occurrence)
operator = operator_class(core)
# note: after this step module_ast and modified_ast
# appear to be the same
modified_ast = operator.visit(module_ast)
modified_source = astunparse.unparse(modified_ast)
if not core.activation_record:
return WorkItem(
worker_outcome=WorkerOutcome.NO_TEST)
# generate a source diff to visualize how the mutation
# operator has changed the code
module_diff = ["--- mutation diff ---"]
for line in difflib.unified_diff(module_source.split('\n'),
modified_source.split('\n'),
fromfile="a" + module_source_file,
tofile="b" + module_source_file,
lineterm=""):
module_diff.append(line)
with using_ast(module_name, module_ast):
rec = test_runner()
rec.update({
'diff': module_diff,
'worker_outcome': WorkerOutcome.NORMAL
})
rec.update(core.activation_record)
return rec
except Exception: # noqa # pylint: disable=broad-except
return WorkItem(
data=traceback.format_exception(*sys.exc_info()),
test_outcome=TestOutcome.INCOMPETENT,
worker_outcome=WorkerOutcome.EXCEPTION)
def worker_process(work_item,
timeout,
config):
"""Run `cosmic-ray worker` in a subprocess and return the results,
passing `config` to it via stdin.
Returns: An updated WorkItem
"""
# The work_item param may come as just a dict (e.g. if it arrives over
# celery), so we reconstruct a WorkItem to make it easier to work with.
work_item = WorkItem(work_item)
command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(
**work_item)
log.info('executing: %s', command)
proc = subprocess.Popen(command.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
config_string = serialize_config(config)
try:
outs, _ = proc.communicate(input=config_string, timeout=timeout)
result = json.loads(outs)
work_item.update({
k: v
for k, v
in result.items()
if v is not None
})
except subprocess.TimeoutExpired as exc:
work_item.worker_outcome = WorkerOutcome.TIMEOUT
work_item.data = exc.timeout
proc.kill()
except json.JSONDecodeError as exc:
work_item.worker_outcome = WorkerOutcome.EXCEPTION
work_item.data = exc
work_item.command_line = command
return work_item
|
flexible
|
{
"blob_id": "73a778c6e4216c23ac8d82eef96ce7b73b18f661",
"index": 9100,
"step-1": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-3": "<mask token>\ntry:\n import typing\nexcept ImportError:\n pass\n<mask token>\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-4": "<mask token>\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\nimport astunparse\ntry:\n import typing\nexcept ImportError:\n pass\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name, operator_class, occurrence, test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n if not core.activation_record:\n return WorkItem(worker_outcome=WorkerOutcome.NO_TEST)\n module_diff = ['--- mutation diff ---']\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'), fromfile='a' +\n module_source_file, tofile='b' + module_source_file,\n lineterm=''):\n module_diff.append(line)\n with using_ast(module_name, module_ast):\n rec = test_runner()\n rec.update({'diff': module_diff, 'worker_outcome': WorkerOutcome.\n NORMAL})\n rec.update(core.activation_record)\n return rec\n except Exception:\n return WorkItem(data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT, worker_outcome=\n WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item, timeout, config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n work_item = WorkItem(work_item)\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(**\n work_item)\n log.info('executing: %s', command)\n proc = subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({k: v for k, v in result.items() if v is not None})\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n work_item.command_line = command\n return work_item\n",
"step-5": "\"\"\"This is the body of the low-level worker tool.\n\nA worker is intended to run as a process that imports a module, mutates it in\none location with one operator, runs the tests, reports the results, and dies.\n\"\"\"\n\nimport difflib\nimport importlib\nimport inspect\nimport json\nimport logging\nimport subprocess\nimport sys\nimport traceback\n\nimport astunparse\ntry:\n import typing # the typing module does some fancy stuff at import time\n # which we shall not do twice... by loading it here,\n # preserve_modules does not delete it and therefore\n # fancy stuff happens only once\nexcept ImportError:\n pass\n\nfrom .config import serialize_config\nfrom .importing import preserve_modules, using_ast\nfrom .mutating import MutatingCore\nfrom .parsing import get_ast\nfrom .testing.test_runner import TestOutcome\nfrom .work_item import WorkItem\n\nlog = logging.getLogger()\n\n\nclass WorkerOutcome:\n \"\"\"Possible outcomes for a worker.\n \"\"\"\n NORMAL = 'normal'\n EXCEPTION = 'exception'\n NO_TEST = 'no-test'\n TIMEOUT = 'timeout'\n SKIPPED = 'skipped'\n\n\ndef worker(module_name,\n operator_class,\n occurrence,\n test_runner):\n \"\"\"Mutate the OCCURRENCE-th site for OPERATOR_CLASS in MODULE_NAME, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Returns: a WorkItem\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value.\n\n \"\"\"\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n # note: after this step module_ast and modified_ast\n # appear to be the same\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n\n if not core.activation_record:\n return WorkItem(\n worker_outcome=WorkerOutcome.NO_TEST)\n\n # generate a source diff to visualize how the mutation\n # operator has changed the code\n module_diff = [\"--- mutation diff ---\"]\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'),\n fromfile=\"a\" + module_source_file,\n tofile=\"b\" + module_source_file,\n lineterm=\"\"):\n module_diff.append(line)\n\n with using_ast(module_name, module_ast):\n rec = test_runner()\n\n rec.update({\n 'diff': module_diff,\n 'worker_outcome': WorkerOutcome.NORMAL\n })\n rec.update(core.activation_record)\n return rec\n\n except Exception: # noqa # pylint: disable=broad-except\n return WorkItem(\n data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT,\n worker_outcome=WorkerOutcome.EXCEPTION)\n\n\ndef worker_process(work_item,\n timeout,\n config):\n \"\"\"Run `cosmic-ray worker` in a subprocess and return the results,\n passing `config` to it via stdin.\n\n Returns: An updated WorkItem\n\n \"\"\"\n # The work_item param may come as just a dict (e.g. if it arrives over\n # celery), so we reconstruct a WorkItem to make it easier to work with.\n work_item = WorkItem(work_item)\n\n command = 'cosmic-ray worker {module} {operator} {occurrence}'.format(\n **work_item)\n\n log.info('executing: %s', command)\n\n proc = subprocess.Popen(command.split(),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True)\n config_string = serialize_config(config)\n try:\n outs, _ = proc.communicate(input=config_string, timeout=timeout)\n result = json.loads(outs)\n work_item.update({\n k: v\n for k, v\n in result.items()\n if v is not None\n })\n except subprocess.TimeoutExpired as exc:\n work_item.worker_outcome = WorkerOutcome.TIMEOUT\n work_item.data = exc.timeout\n proc.kill()\n except json.JSONDecodeError as exc:\n work_item.worker_outcome = WorkerOutcome.EXCEPTION\n work_item.data = exc\n\n work_item.command_line = command\n return work_item\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def is_leap_year(date):
if date % 400 == 0:
return True
elif date % 100 == 0:
return False
elif date % 4 == 0:
return True
else:
return False
<|reserved_special_token_1|>
#returns true if given date is a leap year, false otherwise
def is_leap_year(date):
#if divisible by 400, definitely a leap year
if date % 400 == 0: return True
#if divisible by 100 (and not 400), not a leap year
elif date % 100 == 0: return False
#divisible by 4 and not by 100? leap year
elif date % 4 == 0: return True
#otherwise not a leap year
else : return False
|
flexible
|
{
"blob_id": "496d52a984bb8c0e72948ab0c8db5e6035427a68",
"index": 5209,
"step-1": "<mask token>\n",
"step-2": "def is_leap_year(date):\n if date % 400 == 0:\n return True\n elif date % 100 == 0:\n return False\n elif date % 4 == 0:\n return True\n else:\n return False\n",
"step-3": "#returns true if given date is a leap year, false otherwise\n\ndef is_leap_year(date):\n\t#if divisible by 400, definitely a leap year\n\tif date % 400 == 0: return True \n\t#if divisible by 100 (and not 400), not a leap year\n\telif date % 100 == 0: return False \n\t#divisible by 4 and not by 100? leap year\n\telif date % 4 == 0: return True\n\t#otherwise not a leap year \n\telse : return False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import sqlite3
import operator
from collections import OrderedDict
import matplotlib.pyplot as plt
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace("www.", "")
return domain
except IndexError:
print("URL format error!")
def analyze(results):
prompt = input("[.] Type <c> to print or <p> to plot\n[>] ")
if prompt == "c":
for site, count in list(sites_count_sorted.items()):
print(site, count)
elif prompt == "p":
plt.bar(list(range(len(results))), list(results.values()), align='edge')
plt.xticks(rotation=45)
plt.xticks(list(range(len(results))), list(results.keys()))
plt.show()
else:
print("[.] Uh?")
quit()
|
normal
|
{
"blob_id": "c74fc99bf8582fd83c312f27dfffbe894a2c8c1b",
"index": 3431,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n",
"step-4": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n",
"step-5": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\ndef parse(url):\n\ttry:\n\t\tparsed_url_components = url.split('//')\n\t\tsublevel_split = parsed_url_components[1].split('/', 1)\n\t\tdomain = sublevel_split[0].replace(\"www.\", \"\")\n\t\treturn domain\n\texcept IndexError:\n\t\tprint(\"URL format error!\")\n\ndef analyze(results):\n\n\tprompt = input(\"[.] Type <c> to print or <p> to plot\\n[>] \")\n\n\tif prompt == \"c\":\n\t\tfor site, count in list(sites_count_sorted.items()):\n\t\t\tprint(site, count)\n\telif prompt == \"p\":\n\t\tplt.bar(list(range(len(results))), list(results.values()), align='edge')\n\t\tplt.xticks(rotation=45)\n\t\tplt.xticks(list(range(len(results))), list(results.keys()))\n\t\tplt.show()\n\telse:\n\t\tprint(\"[.] Uh?\")\n\t\tquit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def ddm_dd_convert(coord, direction):
"""Converts GPS reading from DDM to DD
str coord - the ddm coordinate from $GPGGA
str direction - the direction of the coord (N,S,W,E)
returns - string representation of dd coordinate
"""
value = ''
if (direction == 'S' or direction == 'W'):
value += '-'
value += coord[0:-7]
minute = float(coord[-7:])
decimal = round(minute / 60, 8)
result = str(decimal)[1:]
value += result
return value
def gprmc_convert(line):
"""Translates $GPRMC line into documented array
str line - the GPRMC line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[2] == 'V':
return
raw_date = gps[9]
time = ''
date = raw_date[0:2]
month = raw_date[2:4]
year = raw_date[4:]
#modify year if reaches year 2100
time += date + '/' + month + '/20' + year
return [time]
def gpvtg_convert(line):
"""Translates $GPVTG line into documented array
Data only used for measuring ground speed
str line - the GPVTG line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[1] == '0.00':
return
#jsondata = {'Horizontal speed': gps[7] + ' kmph or ' + gps[5] + 'knots'}
return []
def gpgga_convert(line):
"""Translates $GPGGPA line into documented array
str line - the GPGGA line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[6] == '0' :
return
fix = ''
if gps[6] == '1':
fix = 'GPS fix'
elif gps[6] == '2':
fix = 'DGPS fix'
elif gps[6] == '4':
fix = 'RTK Fix coordinate (centimeter precision)'
elif gps[6] == '5':
fix = 'RTK Float (decimeter precision)'
#utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]
lat = ddm_dd_convert(gps[2], gps[3])
long = ddm_dd_convert(gps[4], gps[5])
return [lat, long, fix]
def gpgsa_convert(line):
"""Translates $GPGSA line into documented array
str line - the GPGSA line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[2] == '1':
return
if gps[2] == '2':
fix = '2D fix'
else:
fix = '3D fix'
return [fix]
|
normal
|
{
"blob_id": "dc5630e17bb6ed85157b06108250427be41416d1",
"index": 7766,
"step-1": "<mask token>\n\n\ndef gprmc_convert(line):\n \"\"\"Translates $GPRMC line into documented array\n str line - the GPRMC line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[2] == 'V':\n return\n raw_date = gps[9]\n time = ''\n date = raw_date[0:2]\n month = raw_date[2:4]\n year = raw_date[4:]\n time += date + '/' + month + '/20' + year\n return [time]\n\n\n<mask token>\n\n\ndef gpgga_convert(line):\n \"\"\"Translates $GPGGPA line into documented array\n str line - the GPGGA line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[6] == '0':\n return\n fix = ''\n if gps[6] == '1':\n fix = 'GPS fix'\n elif gps[6] == '2':\n fix = 'DGPS fix'\n elif gps[6] == '4':\n fix = 'RTK Fix coordinate (centimeter precision)'\n elif gps[6] == '5':\n fix = 'RTK Float (decimeter precision)'\n lat = ddm_dd_convert(gps[2], gps[3])\n long = ddm_dd_convert(gps[4], gps[5])\n return [lat, long, fix]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gprmc_convert(line):\n \"\"\"Translates $GPRMC line into documented array\n str line - the GPRMC line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[2] == 'V':\n return\n raw_date = gps[9]\n time = ''\n date = raw_date[0:2]\n month = raw_date[2:4]\n year = raw_date[4:]\n time += date + '/' + month + '/20' + year\n return [time]\n\n\ndef gpvtg_convert(line):\n \"\"\"Translates $GPVTG line into documented array\n Data only used for measuring ground speed\n str line - the GPVTG line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[1] == '0.00':\n return\n return []\n\n\ndef gpgga_convert(line):\n \"\"\"Translates $GPGGPA line into documented array\n str line - the GPGGA line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[6] == '0':\n return\n fix = ''\n if gps[6] == '1':\n fix = 'GPS fix'\n elif gps[6] == '2':\n fix = 'DGPS fix'\n elif gps[6] == '4':\n fix = 'RTK Fix coordinate (centimeter precision)'\n elif gps[6] == '5':\n fix = 'RTK Float (decimeter precision)'\n lat = ddm_dd_convert(gps[2], gps[3])\n long = ddm_dd_convert(gps[4], gps[5])\n return [lat, long, fix]\n\n\n<mask token>\n",
"step-3": "def ddm_dd_convert(coord, direction):\n \"\"\"Converts GPS reading from DDM to DD\n str coord - the ddm coordinate from $GPGGA\n str direction - the direction of the coord (N,S,W,E)\n returns - string representation of dd coordinate\n \"\"\"\n value = ''\n if direction == 'S' or direction == 'W':\n value += '-'\n value += coord[0:-7]\n minute = float(coord[-7:])\n decimal = round(minute / 60, 8)\n result = str(decimal)[1:]\n value += result\n return value\n\n\ndef gprmc_convert(line):\n \"\"\"Translates $GPRMC line into documented array\n str line - the GPRMC line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[2] == 'V':\n return\n raw_date = gps[9]\n time = ''\n date = raw_date[0:2]\n month = raw_date[2:4]\n year = raw_date[4:]\n time += date + '/' + month + '/20' + year\n return [time]\n\n\ndef gpvtg_convert(line):\n \"\"\"Translates $GPVTG line into documented array\n Data only used for measuring ground speed\n str line - the GPVTG line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[1] == '0.00':\n return\n return []\n\n\ndef gpgga_convert(line):\n \"\"\"Translates $GPGGPA line into documented array\n str line - the GPGGA line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[6] == '0':\n return\n fix = ''\n if gps[6] == '1':\n fix = 'GPS fix'\n elif gps[6] == '2':\n fix = 'DGPS fix'\n elif gps[6] == '4':\n fix = 'RTK Fix coordinate (centimeter precision)'\n elif gps[6] == '5':\n fix = 'RTK Float (decimeter precision)'\n lat = ddm_dd_convert(gps[2], gps[3])\n long = ddm_dd_convert(gps[4], gps[5])\n return [lat, long, fix]\n\n\n<mask token>\n",
"step-4": "def ddm_dd_convert(coord, direction):\n \"\"\"Converts GPS reading from DDM to DD\n str coord - the ddm coordinate from $GPGGA\n str direction - the direction of the coord (N,S,W,E)\n returns - string representation of dd coordinate\n \"\"\"\n value = ''\n if direction == 'S' or direction == 'W':\n value += '-'\n value += coord[0:-7]\n minute = float(coord[-7:])\n decimal = round(minute / 60, 8)\n result = str(decimal)[1:]\n value += result\n return value\n\n\ndef gprmc_convert(line):\n \"\"\"Translates $GPRMC line into documented array\n str line - the GPRMC line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[2] == 'V':\n return\n raw_date = gps[9]\n time = ''\n date = raw_date[0:2]\n month = raw_date[2:4]\n year = raw_date[4:]\n time += date + '/' + month + '/20' + year\n return [time]\n\n\ndef gpvtg_convert(line):\n \"\"\"Translates $GPVTG line into documented array\n Data only used for measuring ground speed\n str line - the GPVTG line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[1] == '0.00':\n return\n return []\n\n\ndef gpgga_convert(line):\n \"\"\"Translates $GPGGPA line into documented array\n str line - the GPGGA line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[6] == '0':\n return\n fix = ''\n if gps[6] == '1':\n fix = 'GPS fix'\n elif gps[6] == '2':\n fix = 'DGPS fix'\n elif gps[6] == '4':\n fix = 'RTK Fix coordinate (centimeter precision)'\n elif gps[6] == '5':\n fix = 'RTK Float (decimeter precision)'\n lat = ddm_dd_convert(gps[2], gps[3])\n long = ddm_dd_convert(gps[4], gps[5])\n return [lat, long, fix]\n\n\ndef gpgsa_convert(line):\n \"\"\"Translates $GPGSA line into documented array\n str line - the GPGSA line\n returns - the data documented into array\n \"\"\"\n gps = line.strip().split(',')\n if gps[2] == '1':\n return\n if gps[2] == '2':\n fix = '2D fix'\n else:\n fix = '3D fix'\n return [fix]\n",
"step-5": "\r\n\r\ndef ddm_dd_convert(coord, direction):\r\n \"\"\"Converts GPS reading from DDM to DD\r\n str coord - the ddm coordinate from $GPGGA\r\n str direction - the direction of the coord (N,S,W,E)\r\n returns - string representation of dd coordinate\r\n \"\"\"\r\n value = ''\r\n if (direction == 'S' or direction == 'W'):\r\n value += '-'\r\n value += coord[0:-7] \r\n minute = float(coord[-7:])\r\n decimal = round(minute / 60, 8)\r\n result = str(decimal)[1:]\r\n value += result\r\n return value\r\n\r\ndef gprmc_convert(line):\r\n \"\"\"Translates $GPRMC line into documented array\r\n str line - the GPRMC line\r\n returns - the data documented into array\r\n \"\"\"\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[2] == 'V':\r\n return\r\n raw_date = gps[9]\r\n time = ''\r\n date = raw_date[0:2]\r\n month = raw_date[2:4]\r\n year = raw_date[4:]\r\n #modify year if reaches year 2100\r\n time += date + '/' + month + '/20' + year\r\n return [time]\r\n\r\n\r\ndef gpvtg_convert(line):\r\n \"\"\"Translates $GPVTG line into documented array\r\n Data only used for measuring ground speed\r\n str line - the GPVTG line\r\n returns - the data documented into array\r\n \"\"\"\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[1] == '0.00': \r\n return\r\n #jsondata = {'Horizontal speed': gps[7] + ' kmph or ' + gps[5] + 'knots'}\r\n return []\r\n\r\n\r\ndef gpgga_convert(line):\r\n \"\"\"Translates $GPGGPA line into documented array\r\n str line - the GPGGA line\r\n returns - the data documented into array\r\n \"\"\"\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]\r\n\r\n \r\ndef gpgsa_convert(line):\r\n \"\"\"Translates $GPGSA line into documented array\r\n str line - the GPGSA line\r\n returns - the data documented into array\r\n \"\"\"\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[2] == '1':\r\n return\r\n if gps[2] == '2':\r\n fix = '2D fix'\r\n else:\r\n fix = '3D fix'\r\n return [fix]",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 23 20:33:08 2018
@author: ashima.garg
"""
import tensorflow as tf
class Layer():
def __init__(self, shape, mean, stddev):
self.weights = tf.Variable(tf.random_normal(shape=shape, mean=mean, stddev=stddev))
self.biases = tf.Variable(tf.zeros(shape=[shape[-1]]))
def feed_forward(self, input_data, stride=None):
raise NotImplementedError
class Convolution_Layer(Layer):
def __init__(self, shape, mean, stddev):
super(Convolution_Layer, self).__init__(shape, mean, stddev)
def feed_forward(self, input_data, stride):
conv = tf.nn.conv2d(input_data, self.weights, stride, padding="VALID")
output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))
return output_data
class Output_Layer(Layer):
def __init__(self, shape, mean, stddev):
super(Output_Layer, self).__init__(shape, mean, stddev)
def feed_forward(self, input_data, stride):
output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights, stride, padding="VALID"), self.biases)
return output_data
|
normal
|
{
"blob_id": "ed246f2887f19ccf922a4d386918f0f0771fb443",
"index": 5106,
"step-1": "<mask token>\n\n\nclass Convolution_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Convolution_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n conv = tf.nn.conv2d(input_data, self.weights, stride, padding='VALID')\n output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))\n return output_data\n\n\nclass Output_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Output_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights,\n stride, padding='VALID'), self.biases)\n return output_data\n",
"step-2": "<mask token>\n\n\nclass Layer:\n <mask token>\n <mask token>\n\n\nclass Convolution_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Convolution_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n conv = tf.nn.conv2d(input_data, self.weights, stride, padding='VALID')\n output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))\n return output_data\n\n\nclass Output_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Output_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights,\n stride, padding='VALID'), self.biases)\n return output_data\n",
"step-3": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, shape, mean, stddev):\n self.weights = tf.Variable(tf.random_normal(shape=shape, mean=mean,\n stddev=stddev))\n self.biases = tf.Variable(tf.zeros(shape=[shape[-1]]))\n <mask token>\n\n\nclass Convolution_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Convolution_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n conv = tf.nn.conv2d(input_data, self.weights, stride, padding='VALID')\n output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))\n return output_data\n\n\nclass Output_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Output_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights,\n stride, padding='VALID'), self.biases)\n return output_data\n",
"step-4": "<mask token>\n\n\nclass Layer:\n\n def __init__(self, shape, mean, stddev):\n self.weights = tf.Variable(tf.random_normal(shape=shape, mean=mean,\n stddev=stddev))\n self.biases = tf.Variable(tf.zeros(shape=[shape[-1]]))\n\n def feed_forward(self, input_data, stride=None):\n raise NotImplementedError\n\n\nclass Convolution_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Convolution_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n conv = tf.nn.conv2d(input_data, self.weights, stride, padding='VALID')\n output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))\n return output_data\n\n\nclass Output_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Output_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights,\n stride, padding='VALID'), self.biases)\n return output_data\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 23 20:33:08 2018\n\n@author: ashima.garg\n\"\"\"\n\nimport tensorflow as tf\n\nclass Layer():\n\n def __init__(self, shape, mean, stddev):\n self.weights = tf.Variable(tf.random_normal(shape=shape, mean=mean, stddev=stddev))\n self.biases = tf.Variable(tf.zeros(shape=[shape[-1]]))\n\n def feed_forward(self, input_data, stride=None):\n raise NotImplementedError\n\n\nclass Convolution_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Convolution_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n conv = tf.nn.conv2d(input_data, self.weights, stride, padding=\"VALID\")\n output_data = tf.nn.relu(tf.nn.bias_add(conv, self.biases))\n return output_data\n\n\nclass Output_Layer(Layer):\n\n def __init__(self, shape, mean, stddev):\n super(Output_Layer, self).__init__(shape, mean, stddev)\n\n def feed_forward(self, input_data, stride):\n output_data = tf.nn.bias_add(tf.nn.conv2d(input_data, self.weights, stride, padding=\"VALID\"), self.biases)\n return output_data\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
class Tela:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setEstagio(self, temp):
if temp in self.telas:
self.estagio = temp
else:
print('Tela não existe, erro de digitação no código')
<|reserved_special_token_0|>
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tela:
def __init__(self, j, t0):
self.telas = ['jogo', 'game over']
self.estagio = 'jogo'
self.j = j
self.v0 = Sprite(40, 40, 30, 30, t0)
self.v1 = Sprite(40, 80, 30, 30, t0)
self.v2 = Sprite(40, 120, 30, 30, t0)
self.sprites = [self.v0, self.v1, self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self, temp):
if temp in self.telas:
self.estagio = temp
else:
print('Tela não existe, erro de digitação no código')
<|reserved_special_token_0|>
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Tela:
def __init__(self, j, t0):
self.telas = ['jogo', 'game over']
self.estagio = 'jogo'
self.j = j
self.v0 = Sprite(40, 40, 30, 30, t0)
self.v1 = Sprite(40, 80, 30, 30, t0)
self.v2 = Sprite(40, 120, 30, 30, t0)
self.sprites = [self.v0, self.v1, self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self, temp):
if temp in self.telas:
self.estagio = temp
else:
print('Tela não existe, erro de digitação no código')
def getSprites(self):
return self.sprites
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
<|reserved_special_token_1|>
from SpritesClass import Sprite
from JogadorClass import Jogador
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
class Tela:
def __init__(self, j, t0):
self.telas = ['jogo', 'game over']
self.estagio = 'jogo'
self.j = j
self.v0 = Sprite(40, 40, 30, 30, t0)
self.v1 = Sprite(40, 80, 30, 30, t0)
self.v2 = Sprite(40, 120, 30, 30, t0)
self.sprites = [self.v0, self.v1, self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self, temp):
if temp in self.telas:
self.estagio = temp
else:
print('Tela não existe, erro de digitação no código')
def getSprites(self):
return self.sprites
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
<|reserved_special_token_1|>
from SpritesClass import Sprite
from JogadorClass import Jogador
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
class Tela:
def __init__(self,j,t0):
self.telas = ["jogo","game over"] #telas existentes
self.estagio = "jogo"
self.j = j
#sprites
self.v0 = Sprite(40,40,30,30,t0)
self.v1 = Sprite(40,80,30,30,t0)
self.v2 = Sprite(40,120,30,30,t0)
self.sprites = [self.v0,self.v1,self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self,temp):
if temp in self.telas:
self.estagio=temp
else:
print("Tela não existe, erro de digitação no código")
def getSprites(self):
return self.sprites
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
|
flexible
|
{
"blob_id": "d1f0baa1ff87ece50aaded5e60908269e81b6734",
"index": 1952,
"step-1": "<mask token>\n\n\nclass Tela:\n <mask token>\n <mask token>\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-2": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-3": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-4": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-5": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nclass Tela:\n def __init__(self,j,t0):\n self.telas = [\"jogo\",\"game over\"] #telas existentes\n self.estagio = \"jogo\"\n self.j = j\n\n #sprites\n self.v0 = Sprite(40,40,30,30,t0)\n self.v1 = Sprite(40,80,30,30,t0)\n self.v2 = Sprite(40,120,30,30,t0)\n self.sprites = [self.v0,self.v1,self.v2]\n\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self,temp):\n if temp in self.telas:\n self.estagio=temp\n else:\n print(\"Tela não existe, erro de digitação no código\")\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('shop', '0032_product_sex')]
operations = [migrations.AddField(model_name='product', name=
'price_ret_sale', field=models.IntegerField(default=0, verbose_name
='Розничная цена, с учетом скидки')), migrations.AddField(
model_name='product', name='size_5xl', field=models.IntegerField(
default=0, verbose_name='5XL размер')), migrations.AddField(
model_name='product', name='size_6xl', field=models.IntegerField(
default=0, verbose_name='6XL размер')), migrations.AlterField(
model_name='product', name='price_opt_2', field=models.IntegerField
(default=0, verbose_name='- 3% от 30000')), migrations.AlterField(
model_name='product', name='price_opt_3', field=models.IntegerField
(default=0, verbose_name='- 7% от 70000')), migrations.AlterField(
model_name='product', name='price_opt_4', field=models.IntegerField
(default=0, verbose_name='- 11% от 110000')), migrations.AlterField
(model_name='product', name='sex', field=models.CharField(choices=[
('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (
'Унисекс', 'Unisex')], default='Мужское', max_length=10))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('shop', '0032_product_sex')]
operations = [migrations.AddField(model_name='product', name=
'price_ret_sale', field=models.IntegerField(default=0, verbose_name
='Розничная цена, с учетом скидки')), migrations.AddField(
model_name='product', name='size_5xl', field=models.IntegerField(
default=0, verbose_name='5XL размер')), migrations.AddField(
model_name='product', name='size_6xl', field=models.IntegerField(
default=0, verbose_name='6XL размер')), migrations.AlterField(
model_name='product', name='price_opt_2', field=models.IntegerField
(default=0, verbose_name='- 3% от 30000')), migrations.AlterField(
model_name='product', name='price_opt_3', field=models.IntegerField
(default=0, verbose_name='- 7% от 70000')), migrations.AlterField(
model_name='product', name='price_opt_4', field=models.IntegerField
(default=0, verbose_name='- 11% от 110000')), migrations.AlterField
(model_name='product', name='sex', field=models.CharField(choices=[
('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (
'Унисекс', 'Unisex')], default='Мужское', max_length=10))]
<|reserved_special_token_1|>
# Generated by Django 3.1.6 on 2021-07-17 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0032_product_sex'),
]
operations = [
migrations.AddField(
model_name='product',
name='price_ret_sale',
field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),
),
migrations.AddField(
model_name='product',
name='size_5xl',
field=models.IntegerField(default=0, verbose_name='5XL размер'),
),
migrations.AddField(
model_name='product',
name='size_6xl',
field=models.IntegerField(default=0, verbose_name='6XL размер'),
),
migrations.AlterField(
model_name='product',
name='price_opt_2',
field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_3',
field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_4',
field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),
),
migrations.AlterField(
model_name='product',
name='sex',
field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),
),
]
|
flexible
|
{
"blob_id": "09660cfcff7d5da0339da201cb18b6f63bec2df9",
"index": 1394,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-07-17 10:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0032_product_sex'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='price_ret_sale',\n field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_5xl',\n field=models.IntegerField(default=0, verbose_name='5XL размер'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_6xl',\n field=models.IntegerField(default=0, verbose_name='6XL размер'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_2',\n field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_3',\n field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_4',\n field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='sex',\n field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Given two binary strings, return their sum (also a binary string).
#
# For example,
# a = "11"
# b = "1"
# Return "100".
#
# Show Company Tags
# Show Tags
# Show Similar Problems
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
max_len = max(len(a), len(b))
a = a.zfill(max_len)
b = b.zfill(max_len)
carry = 0
res = ''
for i in range(max_len - 1, -1, -1):
sums = int(a[i]) + int(b[i]) + carry
if sums < 2:
res += str(sums)
carry = 0
elif sums == 2:
res += '0'
carry = 1
else:
res += '1'
carry = 1
if carry == 1:
res += '1'
return res[::-1]
|
normal
|
{
"blob_id": "9655cba5b459ae8b6812bcebc31cc46e19e52386",
"index": 2741,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n carry = 0\n res = ''\n for i in range(max_len - 1, -1, -1):\n sums = int(a[i]) + int(b[i]) + carry\n if sums < 2:\n res += str(sums)\n carry = 0\n elif sums == 2:\n res += '0'\n carry = 1\n else:\n res += '1'\n carry = 1\n if carry == 1:\n res += '1'\n return res[::-1]\n",
"step-4": "# Given two binary strings, return their sum (also a binary string).\n#\n# For example,\n# a = \"11\"\n# b = \"1\"\n# Return \"100\".\n#\n# Show Company Tags\n# Show Tags\n# Show Similar Problems\n\n\nclass Solution(object):\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n carry = 0\n res = ''\n for i in range(max_len - 1, -1, -1):\n sums = int(a[i]) + int(b[i]) + carry\n if sums < 2:\n res += str(sums)\n carry = 0\n elif sums == 2:\n res += '0'\n carry = 1\n else:\n res += '1'\n carry = 1\n if carry == 1:\n res += '1'\n return res[::-1]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# [백준] https://www.acmicpc.net/problem/11053 가장 긴 증가하는 부분 수열
# 일단 재귀식으로 풀어보기
# 이분탐색 어떻게 할 지 모르겠다
import sys
N = int(sys.stdin.readline().strip())
A = list(map(int, sys.stdin.readline().split()))
def recur():
if A[i] < A[i-1]:
|
normal
|
{
"blob_id": "afccf460bcf04f38b8c66177c86debd39a1b165f",
"index": 5159,
"step-1": "# [백준] https://www.acmicpc.net/problem/11053 가장 긴 증가하는 부분 수열\n# 일단 재귀식으로 풀어보기\n# 이분탐색 어떻게 할 지 모르겠다\n\nimport sys\n\nN = int(sys.stdin.readline().strip())\nA = list(map(int, sys.stdin.readline().split()))\n\ndef recur():\n\n if A[i] < A[i-1]:\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import base64
import json
from werkzeug.exceptions import Unauthorized
from ab import app
from ab.utils import logger
from ab.plugins.spring import eureka
def _login(username, password):
"""
only for test
:return the access token
"""
try:
logger.info('login as user {username}'.format(username=username))
eureka_client = eureka.get_instance()
login_resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER', '/commonuser/login', method='post',
json={'username': username, 'password': password})
ticket = login_resp['data']['ticket']
if app.config.TESTING:
logger.debug('ticket for user', username, 'is:', ticket)
resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER', '/commonuser/ticket_login?ticket={ticket}'.format(ticket=ticket),
method='get')
if app.config.TESTING:
logger.debug('access_token for user', username, 'is:', resp['data']['access_token'])
return resp['data']['access_token']
except Exception as e:
logger.error('login fail, please check username/password')
raise
def get_current_user(s: str=None, required=True):
"""
get current user by request auth header
:param s:
:return:
{'code': 'SUCCESS', 'nickName': 'gs1', 'appName': '__base__',
'tenantId': '650', 'tenantCode': 'gs', 'userName': 'gs1', 'userId': '10318'}
"""
eureka_client = eureka.get_instance()
s = s or eureka_client.get_auth_token()
if not s:
if required:
raise Unauthorized('login required')
else:
return None
# format not checked
b64encoded = s[7:].split('.')[1]
decoded = base64.urlsafe_b64decode(b64encoded + '===').decode('utf-8')
return json.loads(decoded)['user_info']
|
normal
|
{
"blob_id": "342063b37038c804c2afa78091b1f1c2facbc560",
"index": 3102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_current_user(s: str=None, required=True):\n \"\"\"\n get current user by request auth header\n :param s:\n :return:\n {'code': 'SUCCESS', 'nickName': 'gs1', 'appName': '__base__',\n 'tenantId': '650', 'tenantCode': 'gs', 'userName': 'gs1', 'userId': '10318'}\n \"\"\"\n eureka_client = eureka.get_instance()\n s = s or eureka_client.get_auth_token()\n if not s:\n if required:\n raise Unauthorized('login required')\n else:\n return None\n b64encoded = s[7:].split('.')[1]\n decoded = base64.urlsafe_b64decode(b64encoded + '===').decode('utf-8')\n return json.loads(decoded)['user_info']\n",
"step-3": "<mask token>\n\n\ndef _login(username, password):\n \"\"\"\n only for test\n :return the access token\n \"\"\"\n try:\n logger.info('login as user {username}'.format(username=username))\n eureka_client = eureka.get_instance()\n login_resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER',\n '/commonuser/login', method='post', json={'username': username,\n 'password': password})\n ticket = login_resp['data']['ticket']\n if app.config.TESTING:\n logger.debug('ticket for user', username, 'is:', ticket)\n resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER',\n '/commonuser/ticket_login?ticket={ticket}'.format(ticket=ticket\n ), method='get')\n if app.config.TESTING:\n logger.debug('access_token for user', username, 'is:', resp[\n 'data']['access_token'])\n return resp['data']['access_token']\n except Exception as e:\n logger.error('login fail, please check username/password')\n raise\n\n\ndef get_current_user(s: str=None, required=True):\n \"\"\"\n get current user by request auth header\n :param s:\n :return:\n {'code': 'SUCCESS', 'nickName': 'gs1', 'appName': '__base__',\n 'tenantId': '650', 'tenantCode': 'gs', 'userName': 'gs1', 'userId': '10318'}\n \"\"\"\n eureka_client = eureka.get_instance()\n s = s or eureka_client.get_auth_token()\n if not s:\n if required:\n raise Unauthorized('login required')\n else:\n return None\n b64encoded = s[7:].split('.')[1]\n decoded = base64.urlsafe_b64decode(b64encoded + '===').decode('utf-8')\n return json.loads(decoded)['user_info']\n",
"step-4": "import base64\nimport json\nfrom werkzeug.exceptions import Unauthorized\nfrom ab import app\nfrom ab.utils import logger\nfrom ab.plugins.spring import eureka\n\n\ndef _login(username, password):\n \"\"\"\n only for test\n :return the access token\n \"\"\"\n try:\n logger.info('login as user {username}'.format(username=username))\n eureka_client = eureka.get_instance()\n login_resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER',\n '/commonuser/login', method='post', json={'username': username,\n 'password': password})\n ticket = login_resp['data']['ticket']\n if app.config.TESTING:\n logger.debug('ticket for user', username, 'is:', ticket)\n resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER',\n '/commonuser/ticket_login?ticket={ticket}'.format(ticket=ticket\n ), method='get')\n if app.config.TESTING:\n logger.debug('access_token for user', username, 'is:', resp[\n 'data']['access_token'])\n return resp['data']['access_token']\n except Exception as e:\n logger.error('login fail, please check username/password')\n raise\n\n\ndef get_current_user(s: str=None, required=True):\n \"\"\"\n get current user by request auth header\n :param s:\n :return:\n {'code': 'SUCCESS', 'nickName': 'gs1', 'appName': '__base__',\n 'tenantId': '650', 'tenantCode': 'gs', 'userName': 'gs1', 'userId': '10318'}\n \"\"\"\n eureka_client = eureka.get_instance()\n s = s or eureka_client.get_auth_token()\n if not s:\n if required:\n raise Unauthorized('login required')\n else:\n return None\n b64encoded = s[7:].split('.')[1]\n decoded = base64.urlsafe_b64decode(b64encoded + '===').decode('utf-8')\n return json.loads(decoded)['user_info']\n",
"step-5": "import base64\nimport json\n\nfrom werkzeug.exceptions import Unauthorized\n\nfrom ab import app\n\nfrom ab.utils import logger\nfrom ab.plugins.spring import eureka\n\n\ndef _login(username, password):\n \"\"\"\n only for test\n :return the access token\n \"\"\"\n try:\n logger.info('login as user {username}'.format(username=username))\n eureka_client = eureka.get_instance()\n\n login_resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER', '/commonuser/login', method='post',\n json={'username': username, 'password': password})\n ticket = login_resp['data']['ticket']\n if app.config.TESTING:\n logger.debug('ticket for user', username, 'is:', ticket)\n\n resp = eureka_client.do_service('GOVBRAIN-AUTHCENTER', '/commonuser/ticket_login?ticket={ticket}'.format(ticket=ticket),\n method='get')\n if app.config.TESTING:\n logger.debug('access_token for user', username, 'is:', resp['data']['access_token'])\n return resp['data']['access_token']\n except Exception as e:\n logger.error('login fail, please check username/password')\n raise\n\n\ndef get_current_user(s: str=None, required=True):\n \"\"\"\n get current user by request auth header\n :param s:\n :return:\n {'code': 'SUCCESS', 'nickName': 'gs1', 'appName': '__base__',\n 'tenantId': '650', 'tenantCode': 'gs', 'userName': 'gs1', 'userId': '10318'}\n \"\"\"\n eureka_client = eureka.get_instance()\n s = s or eureka_client.get_auth_token()\n if not s:\n if required:\n raise Unauthorized('login required')\n else:\n return None\n # format not checked\n b64encoded = s[7:].split('.')[1]\n decoded = base64.urlsafe_b64decode(b64encoded + '===').decode('utf-8')\n return json.loads(decoded)['user_info']\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.views.generic import (ListView, DetailView, CreateView,
DeleteView, UpdateView, TemplateView)
from django.views.generic.edit import ModelFormMixin
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import Movie, Actor
from .forms import MovieForm
from django.http import Http404
def my_print(*args, **kwargs):
raise Http404(*args, **kwargs)
class BaseModelApi(TemplateView, ModelFormMixin):
def get_template_names(self):
prefix = self.request.method
if prefix in ['PUT', 'PATCH', 'POST']:
prefix = 'form'
name = self.model
return [f'{name}/{name}_{prefix}.html']
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
def patch(self, request):
pass
def delete(self, request):
pass
def dispatch(self, request):
pass
def get_context_data(self):
pass
def get_form(self):
pass
def get_form_class(self):
name = f'{self.model}'.title()
# prefix = f'{self.request.method}'.title()
self.form_class = eval(f'{name}Form')
return self.form_class
class MoviesView(ListView):
model = Movie
context_object_name = 'movies'
class MovieView(DetailView):
model = Movie
context_object_name = 'movie'
class ActorView(DetailView):
model = Actor
context_object_name = 'actor'
@method_decorator(login_required, name='dispatch')
class MovieCreateView(CreateView):
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieUpdateView(UpdateView):
model = Movie
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieDelete(DeleteView):
model = Movie
success_url = reverse_lazy('movie_all')
|
normal
|
{
"blob_id": "a63e5186c0eb8b5ae8510b473168db3461166513",
"index": 7784,
"step-1": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-2": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n <mask token>\n <mask token>\n <mask token>\n\n def patch(self, request):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-3": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n <mask token>\n\n def patch(self, request):\n pass\n <mask token>\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-4": "from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView, TemplateView\nfrom django.views.generic.edit import ModelFormMixin\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Movie, Actor\nfrom .forms import MovieForm\nfrom django.http import Http404\n\n\ndef my_print(*args, **kwargs):\n raise Http404(*args, **kwargs)\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n\n def put(self, request):\n pass\n\n def patch(self, request):\n pass\n\n def delete(self, request):\n pass\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-5": "from django.views.generic import (ListView, DetailView, CreateView,\n DeleteView, UpdateView, TemplateView)\nfrom django.views.generic.edit import ModelFormMixin\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Movie, Actor\nfrom .forms import MovieForm\nfrom django.http import Http404\n\n\ndef my_print(*args, **kwargs):\n raise Http404(*args, **kwargs)\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n\n def put(self, request):\n pass\n\n def patch(self, request):\n pass\n\n def delete(self, request):\n pass\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n # prefix = f'{self.request.method}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-ids": [
14,
15,
21,
25,
26
]
}
|
[
14,
15,
21,
25,
26
] |
<|reserved_special_token_0|>
class Env:
<|reserved_special_token_0|>
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[
20:37], 'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20]}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -
1], [-1, -1, -1]])
bidding_player = random.randint(0, 2)
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[
bidding_player])
if wr >= 0.7:
action = {'action': 1}
bid_limit += 1
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards}
card_play_data['landlord'].sort()
player_ids = {'landlord': last_bid, 'landlord_up': (last_bid -
1) % 3, 'landlord_down': (last_bid + 1) % 3}
player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):
'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}
for bid_obs in bid_obs_buffer:
bid_obs.update({'position': player_positions[bid_obs['pid']]})
self._env.card_play_init(card_play_data)
multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.
array([0, 0, 1])]
for pos in ['landlord', 'landlord_up', 'landlord_down']:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %
3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
action = {'action': 0}
self._env.info_sets[pos].multiply_info = multiply_map[action
['action']]
self._env.multiply_count[pos] = action['action']
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.
total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,
'multiply_obs_buffer': multiply_obs_buffer}
<|reserved_special_token_0|>
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num
+ self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[
pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
elif self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +
self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos
] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2 ** (self._env.bid_count - 1) / 8
else:
return -1.0 * 2 ** (self._env.bid_count - 1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[
20:37], 'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20]}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -
1], [-1, -1, -1]])
bidding_player = random.randint(0, 2)
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[
bidding_player])
if wr >= 0.7:
action = {'action': 1}
bid_limit += 1
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards}
card_play_data['landlord'].sort()
player_ids = {'landlord': last_bid, 'landlord_up': (last_bid -
1) % 3, 'landlord_down': (last_bid + 1) % 3}
player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):
'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}
for bid_obs in bid_obs_buffer:
bid_obs.update({'position': player_positions[bid_obs['pid']]})
self._env.card_play_init(card_play_data)
multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.
array([0, 0, 1])]
for pos in ['landlord', 'landlord_up', 'landlord_down']:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %
3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
action = {'action': 0}
self._env.info_sets[pos].multiply_info = multiply_map[action
['action']]
self._env.multiply_count[pos] = action['action']
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.
total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,
'multiply_obs_buffer': multiply_obs_buffer}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {'play': {'landlord': self._get_reward('landlord'),
'landlord_up': self._get_reward('landlord_up'),
'landlord_down': self._get_reward('landlord_down')}, 'bid':
{'landlord': self._get_reward_bidding('landlord') * 2,
'landlord_up': self._get_reward_bidding('landlord_up'),
'landlord_down': self._get_reward_bidding('landlord_down')}}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num
+ self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[
pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
elif self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +
self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos
] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2 ** (self._env.bid_count - 1) / 8
else:
return -1.0 * 2 ** (self._env.bid_count - 1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ['landlord', 'landlord_up',
'landlord_down']:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
elif infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
<|reserved_special_token_0|>
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
<|reserved_special_token_0|>
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
<|reserved_special_token_0|>
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']
)
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.
float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':
infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8)}
return obs
<|reserved_special_token_0|>
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ['landlord', 'landlord_up', 'landlord_up']:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
num_cards_left = np.hstack((landlord_num_cards_left,
landlord_up_num_cards_left, landlord_down_num_cards_left))
x_batch = np.hstack((bid_info_batch, multiply_info_batch))
x_no_action = np.hstack((bid_info, multiply_info))
z = np.vstack((num_cards_left, my_handcards, other_handcards,
three_landlord_cards, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
_action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 32))))
_z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
my_action_batch = my_action_batch[:, np.newaxis, :]
z_batch = np.zeros([len(_z_batch), 40, 54], int)
for i in range(0, len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))
obs = {'position': position, 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[
20:37], 'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20]}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -
1], [-1, -1, -1]])
bidding_player = random.randint(0, 2)
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[
bidding_player])
if wr >= 0.7:
action = {'action': 1}
bid_limit += 1
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards}
card_play_data['landlord'].sort()
player_ids = {'landlord': last_bid, 'landlord_up': (last_bid -
1) % 3, 'landlord_down': (last_bid + 1) % 3}
player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):
'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}
for bid_obs in bid_obs_buffer:
bid_obs.update({'position': player_positions[bid_obs['pid']]})
self._env.card_play_init(card_play_data)
multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.
array([0, 0, 1])]
for pos in ['landlord', 'landlord_up', 'landlord_down']:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %
3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
action = {'action': 0}
self._env.info_sets[pos].multiply_info = multiply_map[action
['action']]
self._env.multiply_count[pos] = action['action']
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.
total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,
'multiply_obs_buffer': multiply_obs_buffer}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {'play': {'landlord': self._get_reward('landlord'),
'landlord_up': self._get_reward('landlord_up'),
'landlord_down': self._get_reward('landlord_down')}, 'bid':
{'landlord': self._get_reward_bidding('landlord') * 2,
'landlord_up': self._get_reward_bidding('landlord_up'),
'landlord_down': self._get_reward_bidding('landlord_down')}}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num
+ self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[
pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
elif self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +
self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos
] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2 ** (self._env.bid_count - 1) / 8
else:
return -1.0 * 2 ** (self._env.bid_count - 1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ['landlord', 'landlord_up',
'landlord_down']:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
elif infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
def _get_one_hot_array(num_left_cards, max_num_cards):
"""
A utility function to obtain one-hot endoding
"""
one_hot = np.zeros(max_num_cards)
if num_left_cards > 0:
one_hot[num_left_cards - 1] = 1
return one_hot
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
<|reserved_special_token_0|>
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
def _get_obs_landlord(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
last_action_batch, landlord_up_played_cards_batch,
landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards, last_action,
landlord_up_played_cards, landlord_down_played_cards,
landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']
)
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.
float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':
infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8)}
return obs
def _get_obs_landlord_withbid(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
last_action_batch, landlord_up_played_cards_batch,
landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards, last_action,
landlord_up_played_cards, landlord_down_played_cards,
landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_general1(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ['landlord', 'landlord_up', 'landlord_up']:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': position, 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ['landlord', 'landlord_up', 'landlord_up']:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
num_cards_left = np.hstack((landlord_num_cards_left,
landlord_up_num_cards_left, landlord_down_num_cards_left))
x_batch = np.hstack((bid_info_batch, multiply_info_batch))
x_no_action = np.hstack((bid_info, multiply_info))
z = np.vstack((num_cards_left, my_handcards, other_handcards,
three_landlord_cards, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
_action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 32))))
_z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
my_action_batch = my_action_batch[:, np.newaxis, :]
z_batch = np.zeros([len(_z_batch), 40, 54], int)
for i in range(0, len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))
obs = {'position': position, 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def gen_bid_legal_actions(player_id, bid_info):
self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +
1) % 3]]
curr_round = -1
for r in range(4):
if -1 in self_bid_info[r]:
curr_round = r
break
bid_actions = []
if curr_round != -1:
self_bid_info[curr_round] = [0, 0, 0]
bid_actions.append(np.array(self_bid_info).flatten())
self_bid_info[curr_round] = [0, 1, 0]
bid_actions.append(np.array(self_bid_info).flatten())
return np.array(bid_actions)
def _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_info = np.array([0, 0, 0])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = bid_legal_actions
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array([])
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(2):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),
'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':
multiply_info.astype(np.int8)}
return obs
def _get_obs_for_bid(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])
x_batch = np.hstack((my_handcards_batch, bid_info_batch))
x_no_action = np.hstack(my_handcards)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':
x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.
astype(np.int8)}
return obs
def _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 3
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
three_landlord_cards = _cards2array(landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(num_legal_actions):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,
'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),
'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':
multiply_info.astype(np.int8)}
return obs
<|reserved_special_token_1|>
from collections import Counter
import numpy as np
import random
import torch
import BidModel
from douzero.env.game import GameEnv
env_version = '3.2'
env_url = 'http://od.vcccz.com/hechuan/env.py'
Card2Column = {(3): 0, (4): 1, (5): 2, (6): 3, (7): 4, (8): 5, (9): 6, (10):
7, (11): 8, (12): 9, (13): 10, (14): 11, (17): 12}
NumOnes2Array = {(0): np.array([0, 0, 0, 0]), (1): np.array([1, 0, 0, 0]),
(2): np.array([1, 1, 0, 0]), (3): np.array([1, 1, 1, 0]), (4): np.array
([1, 1, 1, 1])}
deck = []
for i in range(3, 15):
deck.extend([i for _ in range(4)])
deck.extend([(17) for _ in range(4)])
deck.extend([20, 30])
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[
20:37], 'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20]}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -
1], [-1, -1, -1]])
bidding_player = random.randint(0, 2)
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[
bidding_player])
if wr >= 0.7:
action = {'action': 1}
bid_limit += 1
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info,
card_play_data[bidding_player])
with torch.no_grad():
action = model.forward('bidding', torch.tensor(
bidding_obs['z_batch'], device=device), torch.
tensor(bidding_obs['x_batch'], device=device),
flags=flags)
bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'
][action['action']], 'z_batch': bidding_obs[
'z_batch'][action['action']], 'pid': bidding_player})
if action['action'] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards}
card_play_data['landlord'].sort()
player_ids = {'landlord': last_bid, 'landlord_up': (last_bid -
1) % 3, 'landlord_down': (last_bid + 1) % 3}
player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):
'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}
for bid_obs in bid_obs_buffer:
bid_obs.update({'position': player_positions[bid_obs['pid']]})
self._env.card_play_init(card_play_data)
multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.
array([0, 0, 1])]
for pos in ['landlord', 'landlord_up', 'landlord_down']:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %
3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
action = {'action': 0}
self._env.info_sets[pos].multiply_info = multiply_map[action
['action']]
self._env.multiply_count[pos] = action['action']
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.
total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,
'multiply_obs_buffer': multiply_obs_buffer}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {'play': {'landlord': self._get_reward('landlord'),
'landlord_up': self._get_reward('landlord_up'),
'landlord_down': self._get_reward('landlord_down')}, 'bid':
{'landlord': self._get_reward_bidding('landlord') * 2,
'landlord_up': self._get_reward_bidding('landlord_up'),
'landlord_down': self._get_reward_bidding('landlord_down')}}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num
+ self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[
pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
elif self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +
self._env.multiply_count[pos]) / 8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033
) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos
] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2 ** (self._env.bid_count - 1) / 8
else:
return -1.0 * 2 ** (self._env.bid_count - 1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ['landlord', 'landlord_up',
'landlord_down']:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
elif infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
def _get_one_hot_array(num_left_cards, max_num_cards):
"""
A utility function to obtain one-hot endoding
"""
one_hot = np.zeros(max_num_cards)
if num_left_cards > 0:
one_hot[num_left_cards - 1] = 1
return one_hot
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
def _action_seq_list2array(action_seq_list, new_model=True):
"""
A utility function to encode the historical moves.
We encode the historical 15 actions. If there is
no 15 actions, we pad the features with 0. Since
three moves is a round in DouDizhu, we concatenate
the representations for each consecutive three moves.
Finally, we obtain a 5x162 matrix, which will be fed
into LSTM for encoding.
"""
if new_model:
position_map = {'landlord': 0, 'landlord_up': 1, 'landlord_down': 2}
action_seq_array = np.ones((len(action_seq_list), 54)) * -1
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :54] = _cards2array(list_cards[1])
else:
action_seq_array = np.zeros((len(action_seq_list), 54))
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :] = _cards2array(list_cards[1])
action_seq_array = action_seq_array.reshape(5, 162)
return action_seq_array
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
def _get_obs_landlord(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
last_action_batch, landlord_up_played_cards_batch,
landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards, last_action,
landlord_up_played_cards, landlord_down_played_cards,
landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']
)
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,
:], num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,
:], num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(teammate_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
landlord_played_cards_batch, teammate_played_cards_batch,
last_action_batch, last_landlord_action_batch,
last_teammate_action_batch, landlord_num_cards_left_batch,
teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards,
landlord_played_cards, teammate_played_cards, last_action,
last_landlord_action, last_teammate_action, landlord_num_cards_left,
teammate_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.
float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':
infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8)}
return obs
def _get_obs_landlord_withbid(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((my_handcards_batch, other_handcards_batch,
last_action_batch, landlord_up_played_cards_batch,
landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))
x_no_action = np.hstack((my_handcards, other_handcards, last_action,
landlord_up_played_cards, landlord_down_played_cards,
landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 15, False), False)
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_general1(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ['landlord', 'landlord_up', 'landlord_up']:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))
z = _action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': position, 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(infoset.
num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ['landlord', 'landlord_up', 'landlord_up']:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']
)
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(infoset.played_cards[
'landlord_down'])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(infoset.bomb_num)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
num_cards_left = np.hstack((landlord_num_cards_left,
landlord_up_num_cards_left, landlord_down_num_cards_left))
x_batch = np.hstack((bid_info_batch, multiply_info_batch))
x_no_action = np.hstack((bid_info, multiply_info))
z = np.vstack((num_cards_left, my_handcards, other_handcards,
three_landlord_cards, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
_action_seq_list2array(_process_action_seq(infoset.
card_play_action_seq, 32))))
_z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
my_action_batch = my_action_batch[:, np.newaxis, :]
z_batch = np.zeros([len(_z_batch), 40, 54], int)
for i in range(0, len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))
obs = {'position': position, 'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.
legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.
astype(np.int8)}
return obs
def gen_bid_legal_actions(player_id, bid_info):
self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +
1) % 3]]
curr_round = -1
for r in range(4):
if -1 in self_bid_info[r]:
curr_round = r
break
bid_actions = []
if curr_round != -1:
self_bid_info[curr_round] = [0, 0, 0]
bid_actions.append(np.array(self_bid_info).flatten())
self_bid_info[curr_round] = [0, 1, 0]
bid_actions.append(np.array(self_bid_info).flatten())
return np.array(bid_actions)
def _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_info = np.array([0, 0, 0])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = bid_legal_actions
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array([])
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(2):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),
'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':
multiply_info.astype(np.int8)}
return obs
def _get_obs_for_bid(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])
x_batch = np.hstack((my_handcards_batch, bid_info_batch))
x_no_action = np.hstack(my_handcards)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':
x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.
astype(np.int8)}
return obs
def _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 3
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],
'landlord_down': [0, 0, 1]}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,
axis=0)
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
three_landlord_cards = _cards2array(landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,
:], num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(num_legal_actions):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left
[np.newaxis, :], num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(landlord_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.
newaxis, :], num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards
[np.newaxis, :], num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,
axis=0)
x_batch = np.hstack((position_info_batch, my_handcards_batch,
other_handcards_batch, three_landlord_cards_batch,
last_action_batch, landlord_played_cards_batch,
landlord_up_played_cards_batch, landlord_down_played_cards_batch,
landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,
multiply_info_batch, my_action_batch))
x_no_action = np.hstack((position_info, my_handcards, other_handcards,
three_landlord_cards, last_action, landlord_played_cards,
landlord_up_played_cards, landlord_down_played_cards,
landlord_num_cards_left, landlord_up_num_cards_left,
landlord_down_num_cards_left, bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)
obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':
z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,
'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),
'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':
multiply_info.astype(np.int8)}
return obs
<|reserved_special_token_1|>
from collections import Counter
import numpy as np
import random
import torch
import BidModel
from douzero.env.game import GameEnv
env_version = "3.2"
env_url = "http://od.vcccz.com/hechuan/env.py"
Card2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,
11: 8, 12: 9, 13: 10, 14: 11, 17: 12}
NumOnes2Array = {0: np.array([0, 0, 0, 0]),
1: np.array([1, 0, 0, 0]),
2: np.array([1, 1, 0, 0]),
3: np.array([1, 1, 1, 0]),
4: np.array([1, 1, 1, 1])}
deck = []
for i in range(3, 15):
deck.extend([i for _ in range(4)])
deck.extend([17 for _ in range(4)])
deck.extend([20, 30])
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
# Initialize players
# We use three dummy player for the target position
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
# Initialize the internal environment
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
# Randomly shuffle the deck
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20],
'landlord_up': _deck[20:37],
'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20],
}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [
_deck[:17],
_deck[17:34],
_deck[34:51],
]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]])
bidding_player = random.randint(0, 2)
# bidding_player = 0 # debug
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[bidding_player])
if wr >= 0.7:
action = {"action": 1} # debug
bid_limit += 1
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards,
}
card_play_data["landlord"].sort()
player_ids = {
'landlord': last_bid,
'landlord_up': (last_bid - 1) % 3,
'landlord_down': (last_bid + 1) % 3,
}
player_positions = {
last_bid: 'landlord',
(last_bid - 1) % 3: 'landlord_up',
(last_bid + 1) % 3: 'landlord_down'
}
for bid_obs in bid_obs_buffer:
bid_obs.update({"position": player_positions[bid_obs["pid"]]})
# Initialize the cards
self._env.card_play_init(card_play_data)
multiply_map = [
np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1])
]
for pos in ["landlord", "landlord_up", "landlord_down"]:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
# multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],
# landlord_cards)
# action = model.forward(pos, torch.tensor(multiply_obs["z_batch"], device=device),
# torch.tensor(multiply_obs["x_batch"], device=device), flags=flags)
# multiply_obs_buffer.append({
# "x_batch": multiply_obs["x_batch"][action["action"]],
# "z_batch": multiply_obs["z_batch"][action["action"]],
# "position": pos
# })
action = {"action": 0}
self._env.info_sets[pos].multiply_info = multiply_map[action["action"]]
self._env.multiply_count[pos] = action["action"]
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print("发牌情况: %i/%i %.1f%%" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {
"bid_obs_buffer": bid_obs_buffer,
"multiply_obs_buffer": multiply_obs_buffer
}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {
"play": {
"landlord": self._get_reward("landlord"),
"landlord_up": self._get_reward("landlord_up"),
"landlord_down": self._get_reward("landlord_down")
},
"bid": {
"landlord": self._get_reward_bidding("landlord")*2,
"landlord_up": self._get_reward_bidding("landlord_up"),
"landlord_down": self._get_reward_bidding("landlord_down")
}
}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
else:
if self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2**(self._env.bid_count-1) / 8
else:
return -1.0 * 2**(self._env.bid_count-1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ["landlord", "landlord_up", "landlord_down"]:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
else:
if infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
def _get_one_hot_array(num_left_cards, max_num_cards):
"""
A utility function to obtain one-hot endoding
"""
one_hot = np.zeros(max_num_cards)
if num_left_cards > 0:
one_hot[num_left_cards - 1] = 1
return one_hot
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
# def _action_seq_list2array(action_seq_list):
# """
# A utility function to encode the historical moves.
# We encode the historical 15 actions. If there is
# no 15 actions, we pad the features with 0. Since
# three moves is a round in DouDizhu, we concatenate
# the representations for each consecutive three moves.
# Finally, we obtain a 5x162 matrix, which will be fed
# into LSTM for encoding.
# """
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# action_seq_array[row, :] = _cards2array(list_cards)
# # action_seq_array = action_seq_array.reshape(5, 162)
# return action_seq_array
def _action_seq_list2array(action_seq_list, new_model=True):
"""
A utility function to encode the historical moves.
We encode the historical 15 actions. If there is
no 15 actions, we pad the features with 0. Since
three moves is a round in DouDizhu, we concatenate
the representations for each consecutive three moves.
Finally, we obtain a 5x162 matrix, which will be fed
into LSTM for encoding.
"""
if new_model:
position_map = {"landlord": 0, "landlord_up": 1, "landlord_down": 2}
action_seq_array = np.ones((len(action_seq_list), 54)) * -1 # Default Value -1 for not using area
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :54] = _cards2array(list_cards[1])
else:
action_seq_array = np.zeros((len(action_seq_list), 54))
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :] = _cards2array(list_cards[1])
action_seq_array = action_seq_array.reshape(5, 162)
return action_seq_array
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# if list_cards != []:
# action_seq_array[row, :] = _cards2array(list_cards[1])
# return action_seq_array
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
def _get_obs_landlord(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_down'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_up',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_down',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_withbid(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general1(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch, # 3
my_handcards_batch, # 54
other_handcards_batch, # 54
three_landlord_cards_batch, # 54
last_action_batch, # 54
landlord_played_cards_batch, # 54
landlord_up_played_cards_batch, # 54
landlord_down_played_cards_batch, # 54
landlord_num_cards_left_batch, # 20
landlord_up_num_cards_left_batch, # 17
landlord_down_num_cards_left_batch, # 17
bomb_num_batch, # 15
bid_info_batch, # 12
multiply_info_batch, # 3
my_action_batch)) # 54
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num,
bid_info,
multiply_info))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
num_cards_left = np.hstack((
landlord_num_cards_left, # 20
landlord_up_num_cards_left, # 17
landlord_down_num_cards_left))
x_batch = np.hstack((
bid_info_batch, # 12
multiply_info_batch)) # 3
x_no_action = np.hstack((
bid_info,
multiply_info))
z =np.vstack((
num_cards_left,
my_handcards, # 54
other_handcards, # 54
three_landlord_cards, # 54
landlord_played_cards, # 54
landlord_up_played_cards, # 54
landlord_down_played_cards, # 54
_action_seq_list2array(_process_action_seq(infoset.card_play_action_seq, 32))
))
_z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
my_action_batch = my_action_batch[:,np.newaxis,:]
z_batch = np.zeros([len(_z_batch),40,54],int)
for i in range(0,len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i],_z_batch[i]))
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def gen_bid_legal_actions(player_id, bid_info):
self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id + 1) % 3]]
curr_round = -1
for r in range(4):
if -1 in self_bid_info[r]:
curr_round = r
break
bid_actions = []
if curr_round != -1:
self_bid_info[curr_round] = [0, 0, 0]
bid_actions.append(np.array(self_bid_info).flatten())
self_bid_info[curr_round] = [0, 1, 0]
bid_actions.append(np.array(self_bid_info).flatten())
return np.array(bid_actions)
def _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_info = np.array([0, 0, 0])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = bid_legal_actions
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array([])
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(2):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8),
"multiply_info": multiply_info.astype(np.int8)
}
return obs
def _get_obs_for_bid(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])
x_batch = np.hstack((my_handcards_batch,
bid_info_batch))
x_no_action = np.hstack((my_handcards))
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': np.array([0,0]),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8)
}
return obs
def _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 3
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
three_landlord_cards = _cards2array(landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(num_legal_actions):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': multiply_info_batch,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info": bid_info.astype(np.int8),
"multiply_info_batch": multiply_info.astype(np.int8)
}
return obs
|
flexible
|
{
"blob_id": "4015078ee9640c4558a4f29ebbb89f9098a31014",
"index": 5720,
"step-1": "<mask token>\n\n\nclass Env:\n <mask token>\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n <mask token>\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n <mask token>\n <mask token>\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\n<mask token>\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n<mask token>\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\n<mask token>\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\n<mask token>\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n<mask token>\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +\n 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':\n multiply_info.astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n x_batch = np.hstack((my_handcards_batch, bid_info_batch))\n x_no_action = np.hstack(my_handcards)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':\n x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':\n multiply_info.astype(np.int8)}\n return obs\n",
"step-4": "from collections import Counter\nimport numpy as np\nimport random\nimport torch\nimport BidModel\nfrom douzero.env.game import GameEnv\nenv_version = '3.2'\nenv_url = 'http://od.vcccz.com/hechuan/env.py'\nCard2Column = {(3): 0, (4): 1, (5): 2, (6): 3, (7): 4, (8): 5, (9): 6, (10):\n 7, (11): 8, (12): 9, (13): 10, (14): 11, (17): 12}\nNumOnes2Array = {(0): np.array([0, 0, 0, 0]), (1): np.array([1, 0, 0, 0]),\n (2): np.array([1, 1, 0, 0]), (3): np.array([1, 1, 1, 0]), (4): np.array\n ([1, 1, 1, 1])}\ndeck = []\nfor i in range(3, 15):\n deck.extend([i for _ in range(4)])\ndeck.extend([(17) for _ in range(4)])\ndeck.extend([20, 30])\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\ndef _action_seq_list2array(action_seq_list, new_model=True):\n \"\"\"\n A utility function to encode the historical moves.\n We encode the historical 15 actions. If there is\n no 15 actions, we pad the features with 0. Since\n three moves is a round in DouDizhu, we concatenate\n the representations for each consecutive three moves.\n Finally, we obtain a 5x162 matrix, which will be fed\n into LSTM for encoding.\n \"\"\"\n if new_model:\n position_map = {'landlord': 0, 'landlord_up': 1, 'landlord_down': 2}\n action_seq_array = np.ones((len(action_seq_list), 54)) * -1\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :54] = _cards2array(list_cards[1])\n else:\n action_seq_array = np.zeros((len(action_seq_list), 54))\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :] = _cards2array(list_cards[1])\n action_seq_array = action_seq_array.reshape(5, 162)\n return action_seq_array\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +\n 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':\n multiply_info.astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n x_batch = np.hstack((my_handcards_batch, bid_info_batch))\n x_no_action = np.hstack(my_handcards)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':\n x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':\n multiply_info.astype(np.int8)}\n return obs\n",
"step-5": "from collections import Counter\nimport numpy as np\nimport random\nimport torch\nimport BidModel\n\nfrom douzero.env.game import GameEnv\n\nenv_version = \"3.2\"\nenv_url = \"http://od.vcccz.com/hechuan/env.py\"\nCard2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,\n 11: 8, 12: 9, 13: 10, 14: 11, 17: 12}\n\nNumOnes2Array = {0: np.array([0, 0, 0, 0]),\n 1: np.array([1, 0, 0, 0]),\n 2: np.array([1, 1, 0, 0]),\n 3: np.array([1, 1, 1, 0]),\n 4: np.array([1, 1, 1, 1])}\n\ndeck = []\nfor i in range(3, 15):\n deck.extend([i for _ in range(4)])\ndeck.extend([17 for _ in range(4)])\ndeck.extend([20, 30])\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n\n # Randomly shuffle the deck\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20],\n 'landlord_up': _deck[20:37],\n 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20],\n }\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [\n _deck[:17],\n _deck[17:34],\n _deck[34:51],\n ]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n # bidding_player = 0 # debug\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[bidding_player])\n if wr >= 0.7:\n action = {\"action\": 1} # debug\n bid_limit += 1\n\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards,\n }\n card_play_data[\"landlord\"].sort()\n player_ids = {\n 'landlord': last_bid,\n 'landlord_up': (last_bid - 1) % 3,\n 'landlord_down': (last_bid + 1) % 3,\n }\n player_positions = {\n last_bid: 'landlord',\n (last_bid - 1) % 3: 'landlord_up',\n (last_bid + 1) % 3: 'landlord_down'\n }\n for bid_obs in bid_obs_buffer:\n bid_obs.update({\"position\": player_positions[bid_obs[\"pid\"]]})\n\n # Initialize the cards\n self._env.card_play_init(card_play_data)\n multiply_map = [\n np.array([1, 0, 0]),\n np.array([0, 1, 0]),\n np.array([0, 0, 1])\n ]\n for pos in [\"landlord\", \"landlord_up\", \"landlord_down\"]:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n # multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],\n # landlord_cards)\n # action = model.forward(pos, torch.tensor(multiply_obs[\"z_batch\"], device=device),\n # torch.tensor(multiply_obs[\"x_batch\"], device=device), flags=flags)\n # multiply_obs_buffer.append({\n # \"x_batch\": multiply_obs[\"x_batch\"][action[\"action\"]],\n # \"z_batch\": multiply_obs[\"z_batch\"][action[\"action\"]],\n # \"position\": pos\n # })\n action = {\"action\": 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action[\"action\"]]\n self._env.multiply_count[pos] = action[\"action\"]\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print(\"发牌情况: %i/%i %.1f%%\" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {\n \"bid_obs_buffer\": bid_obs_buffer,\n \"multiply_obs_buffer\": multiply_obs_buffer\n }\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n else:\n if self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2**(self._env.bid_count-1) / 8\n else:\n return -1.0 * 2**(self._env.bid_count-1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in [\"landlord\", \"landlord_up\", \"landlord_down\"]:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n else:\n if infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n# def _action_seq_list2array(action_seq_list):\n# \"\"\"\n# A utility function to encode the historical moves.\n# We encode the historical 15 actions. If there is\n# no 15 actions, we pad the features with 0. Since\n# three moves is a round in DouDizhu, we concatenate\n# the representations for each consecutive three moves.\n# Finally, we obtain a 5x162 matrix, which will be fed\n# into LSTM for encoding.\n# \"\"\"\n# action_seq_array = np.zeros((len(action_seq_list), 54))\n# for row, list_cards in enumerate(action_seq_list):\n# action_seq_array[row, :] = _cards2array(list_cards)\n# # action_seq_array = action_seq_array.reshape(5, 162)\n# return action_seq_array\n\ndef _action_seq_list2array(action_seq_list, new_model=True):\n \"\"\"\n A utility function to encode the historical moves.\n We encode the historical 15 actions. If there is\n no 15 actions, we pad the features with 0. Since\n three moves is a round in DouDizhu, we concatenate\n the representations for each consecutive three moves.\n Finally, we obtain a 5x162 matrix, which will be fed\n into LSTM for encoding.\n \"\"\"\n\n if new_model:\n position_map = {\"landlord\": 0, \"landlord_up\": 1, \"landlord_down\": 2}\n action_seq_array = np.ones((len(action_seq_list), 54)) * -1 # Default Value -1 for not using area\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :54] = _cards2array(list_cards[1])\n else:\n action_seq_array = np.zeros((len(action_seq_list), 54))\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :] = _cards2array(list_cards[1])\n action_seq_array = action_seq_array.reshape(5, 162)\n return action_seq_array\n\n # action_seq_array = np.zeros((len(action_seq_list), 54))\n # for row, list_cards in enumerate(action_seq_list):\n # if list_cards != []:\n # action_seq_array[row, :] = _cards2array(list_cards[1])\n # return action_seq_array\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n last_action_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n last_action,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n last_landlord_action = _cards2array(\n infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(\n last_landlord_action[np.newaxis, :],\n num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_teammate_action = _cards2array(\n infoset.last_move_dict['landlord_down'])\n last_teammate_action_batch = np.repeat(\n last_teammate_action[np.newaxis, :],\n num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(\n teammate_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n teammate_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(\n teammate_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n landlord_played_cards_batch,\n teammate_played_cards_batch,\n last_action_batch,\n last_landlord_action_batch,\n last_teammate_action_batch,\n landlord_num_cards_left_batch,\n teammate_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n landlord_played_cards,\n teammate_played_cards,\n last_action,\n last_landlord_action,\n last_teammate_action,\n landlord_num_cards_left,\n teammate_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord_up',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n last_landlord_action = _cards2array(\n infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(\n last_landlord_action[np.newaxis, :],\n num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_teammate_action = _cards2array(\n infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(\n last_teammate_action[np.newaxis, :],\n num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(\n teammate_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n teammate_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(\n teammate_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n landlord_played_cards_batch,\n teammate_played_cards_batch,\n last_action_batch,\n last_landlord_action_batch,\n last_teammate_action_batch,\n landlord_num_cards_left_batch,\n teammate_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n landlord_played_cards,\n teammate_played_cards,\n last_action,\n last_landlord_action,\n last_teammate_action,\n landlord_num_cards_left,\n teammate_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord_down',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n last_action_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n last_action,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards_left_list = []\n for pos in [\"landlord\", \"landlord_up\", \"landlord_up\"]:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch, # 3\n my_handcards_batch, # 54\n other_handcards_batch, # 54\n three_landlord_cards_batch, # 54\n last_action_batch, # 54\n landlord_played_cards_batch, # 54\n landlord_up_played_cards_batch, # 54\n landlord_down_played_cards_batch, # 54\n landlord_num_cards_left_batch, # 20\n landlord_up_num_cards_left_batch, # 17\n landlord_down_num_cards_left_batch, # 17\n bomb_num_batch, # 15\n bid_info_batch, # 12\n multiply_info_batch, # 3\n my_action_batch)) # 54\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num,\n bid_info,\n multiply_info))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': position,\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards_left_list = []\n for pos in [\"landlord\", \"landlord_up\", \"landlord_up\"]:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n num_cards_left = np.hstack((\n landlord_num_cards_left, # 20\n landlord_up_num_cards_left, # 17\n landlord_down_num_cards_left))\n\n x_batch = np.hstack((\n bid_info_batch, # 12\n multiply_info_batch)) # 3\n x_no_action = np.hstack((\n bid_info,\n multiply_info))\n z =np.vstack((\n num_cards_left,\n my_handcards, # 54\n other_handcards, # 54\n three_landlord_cards, # 54\n landlord_played_cards, # 54\n landlord_up_played_cards, # 54\n landlord_down_played_cards, # 54\n _action_seq_list2array(_process_action_seq(infoset.card_play_action_seq, 32))\n ))\n\n _z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:,np.newaxis,:]\n z_batch = np.zeros([len(_z_batch),40,54],int)\n for i in range(0,len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i],_z_batch[i]))\n obs = {\n 'position': position,\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id + 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch,\n my_handcards_batch,\n other_handcards_batch,\n three_landlord_cards_batch,\n last_action_batch,\n landlord_played_cards_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_num_cards_left_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n bid_info_batch,\n multiply_info_batch,\n my_action_batch))\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n \"bid_info_batch\": bid_info_batch.astype(np.int8),\n \"multiply_info\": multiply_info.astype(np.int8)\n }\n return obs\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n\n x_batch = np.hstack((my_handcards_batch,\n bid_info_batch))\n x_no_action = np.hstack((my_handcards))\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': np.array([0,0]),\n 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n \"bid_info_batch\": bid_info_batch.astype(np.int8)\n }\n return obs\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch,\n my_handcards_batch,\n other_handcards_batch,\n three_landlord_cards_batch,\n last_action_batch,\n landlord_played_cards_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_num_cards_left_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n bid_info_batch,\n multiply_info_batch,\n my_action_batch))\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n \"bid_info\": bid_info.astype(np.int8),\n \"multiply_info_batch\": multiply_info.astype(np.int8)\n }\n return obs\n",
"step-ids": [
13,
24,
32,
36,
37
]
}
|
[
13,
24,
32,
36,
37
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
t = triangle
if len(t) == 1:
return t[0][0]
ret = [0] * len(t)
ret[0] = t[0][0]
for i in range(1, len(t)):
for j in range(0, i + 1):
if j == 0:
old_v = ret[j]
ret[j] += t[i][j]
elif j == i:
ret[j] = old_v + t[i][j]
else:
val = min(old_v + t[i][j], ret[j] + t[i][j])
old_v = ret[j]
ret[j] = val
return min(ret)
|
flexible
|
{
"blob_id": "84515ef6879b54b333f9afd48c6c4b7c43ff6957",
"index": 1068,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n t = triangle\n if len(t) == 1:\n return t[0][0]\n ret = [0] * len(t)\n ret[0] = t[0][0]\n for i in range(1, len(t)):\n for j in range(0, i + 1):\n if j == 0:\n old_v = ret[j]\n ret[j] += t[i][j]\n elif j == i:\n ret[j] = old_v + t[i][j]\n else:\n val = min(old_v + t[i][j], ret[j] + t[i][j])\n old_v = ret[j]\n ret[j] = val\n return min(ret)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
#coding: utf-8
"""
1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.
2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:
https://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#
Use a função read_csv() para abrir os arquivos
"""
if __name__ == "__main__":
pass
|
flexible
|
{
"blob_id": "d95d899c6eae5a90c90d3d920ee40b38bf304805",
"index": 532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pass\n",
"step-3": "#coding: utf-8\n\"\"\" \n1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.\n\n\n2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:\nhttps://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#\nUse a função read_csv() para abrir os arquivos\n\n\"\"\"\n\nif __name__ == \"__main__\":\n\tpass",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from fastapi import APIRouter
from .endpoints import submissions
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router,
prefix="/submissions",
tags=["submissions"])
# api_router.include_router(users.router, prefix="/users", tags=["users"])
return api_router
|
normal
|
{
"blob_id": "844c9af4f0d4ca33e7c69b72f9886f58ceebefdb",
"index": 2719,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-3": "from fastapi import APIRouter\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-4": "from fastapi import APIRouter\n\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router,\n prefix=\"/submissions\",\n tags=[\"submissions\"])\n # api_router.include_router(users.router, prefix=\"/users\", tags=[\"users\"])\n return api_router\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
list.clear()
for i in range(0, n):
list.append('')
tmp = input().split()
list[i] = tmp[0] + list[int(tmp[1]) - 1]
for i in range(0, k):
start = input()
print(len([word for word in list if word.startswith(start)]))
<|reserved_special_token_1|>
list = input().split()
n = int(list[0])
k = int(list[1])
list.clear()
for i in range(0, n):
list.append('')
tmp = input().split()
list[i] = tmp[0] + list[int(tmp[1]) - 1]
for i in range(0, k):
start = input()
print(len([word for word in list if word.startswith(start)]))
<|reserved_special_token_1|>
list = input().split()
n = int(list[0])
k = int(list[1])
list.clear()
for i in range(0, n):
list.append("")
tmp = input().split()
list[i] = tmp[0] + list[int(tmp[1])-1]
for i in range(0, k):
start = input()
print(len([word for word in list if word.startswith(start)]))
|
flexible
|
{
"blob_id": "1808be09c2730af5829bb0c7c0c7cfe9f80fe84c",
"index": 7546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlist.clear()\nfor i in range(0, n):\n list.append('')\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1]) - 1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))\n",
"step-3": "list = input().split()\nn = int(list[0])\nk = int(list[1])\nlist.clear()\nfor i in range(0, n):\n list.append('')\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1]) - 1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))\n",
"step-4": "list = input().split()\nn = int(list[0])\nk = int(list[1])\nlist.clear()\nfor i in range(0, n):\n list.append(\"\")\n tmp = input().split()\n list[i] = tmp[0] + list[int(tmp[1])-1]\nfor i in range(0, k):\n start = input()\n print(len([word for word in list if word.startswith(start)]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import numpy
import theano
from theano import tensor as T
from utils import shared_dataset
from layer import HiddenLayer, LogisticRegressionLayer
import pickle as pkl
from mlp import MLP, Costs, NeuralActivations
DEBUGGING = False
class PostMLP(MLP):
"""Post training:- Second phase MLP.
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function thanh or the
sigmoid function (defined here by a ``SigmoidalLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self,
input,
n_in=64*11,
n_hiddens=[500, 400],
n_out=1,
normalize_inputs=False,
use_adagrad=True,
activation=NeuralActivations.Rectifier,
exp_id=1,
rng=None,
params_first_phase=None):
"""
Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in which
the labels lie.
"""
if DEBUGGING:
theano.config.compute_test_value = 'raise'
self.input.tag.test_value = numpy.random.rand(1800, n_in)
super(PostMLP, self).__init__(input,
n_in,
n_hiddens,
n_out,
normalize_inputs,
use_adagrad,
activation,
exp_id,
rng)
self.params_first_phase = params_first_phase
def train(self,
data=None,
labels=None,
**kwargs):
learning_rate = kwargs["learning_rate"]
L1_reg = kwargs["L1_reg"]
L2_reg = kwargs["L2_reg"]
n_epochs = kwargs["nepochs"]
cost_type = kwargs["cost_type"]
save_exp_data = kwargs["save_exp_data"]
batch_size = kwargs["batch_size"]
normalize_weights = kwargs["normalize_weights"]
enable_dropout = kwargs["enable_dropout"]
if data is None:
raise Exception("Post-training can't start without pretraining class membership probabilities.")
if labels is None:
raise Exception("Post-training can not start without posttraining class labels.")
self.state = "train"
self.learning_rate = learning_rate
train_set_x = shared_dataset(data, name="training_set_x")
train_set_y = shared_dataset(labels, name="labels")
train_set_y = T.cast(train_set_y, "int32")
# compute number of minibatches for training
n_examples = data.shape[0]
n_train_batches = int(math.ceil(n_examples / batch_size))
######################
# BUILD ACTUAL MODEL #
######################
print '...postraining the model'
# allocate symbolic variables for the data
index = T.lscalar('index') # index to a [mini]batch
y = T.ivector('y') # the labels are presented as 1D vector of int32
mode = "FAST_RUN"
#import pudb; pudb.set_trace()
if DEBUGGING:
index.tag.test_value = 0
y.tag.test_value = numpy.ones(n_examples)
mode = "DEBUG_MODE"
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically.
cost = self.get_cost_function(cost_type, y, L1_reg, L2_reg)
updates = self.sgd_updates(cost, learning_rate)
# compiling a Theano function `train_model` that returns the cost, butx
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
# p_y_given_x = self.class_memberships
train_model = theano.function(inputs=[index],
outputs=cost,
updates = updates,
givens = {
self.input: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
},
mode=mode)
if DEBUGGING:
theano.printing.debugprint(train_model)
epoch = 0
costs = []
Ws = []
while (epoch < n_epochs):
print "In da epoch %d" % (epoch)
for minibatch_index in xrange(n_train_batches):
print "Postraining in Minibatch %i " % (minibatch_index)
minibatch_avg_cost = train_model(minibatch_index)
if enable_dropout:
self.dropout()
if normalize_weights:
self.normalize_weights()
costs.append(float(minibatch_avg_cost))
Ws.append(self.params[2])
epoch +=1
if save_exp_data:
self.data_dict['Ws'].append(Ws)
self.data_dict['costs'].append([costs])
self.save_data()
return costs
def test(self,
data=None,
labels=None,
**kwargs):
save_exp_data = kwargs["save_exp_data"]
batch_size = kwargs["batch_size"]
if data is None:
raise Exception("Post-training can't start without pretraining class membership probabilities.")
if labels is None:
raise Exception("Post-training can not start without posttraining class-membership probabilities.")
test_set_x = shared_dataset(data)
test_set_y = shared_dataset(labels)
test_set_y = T.cast(test_set_y, "int32")
self.state = "test"
# compute number of minibatches for training, validation and testing
n_examples = data.shape[0]
n_test_batches = int(math.ceil(n_examples / batch_size))
print '...post-testing the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
mode = "FAST_RUN"
if DEBUGGING:
theano.config.compute_test_value = 'raise'
index.tag.test_value = 0
y.tag.test_value = numpy.ones(n_examples)
mode = "DEBUG_MODE"
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
# compiling a Theano function `test_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
test_model = theano.function(inputs=[index],
outputs=self.errors(y),
givens={
self.input: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]},
mode=mode)
###############
# TEST MODEL #
###############
test_losses = []
for minibatch_index in xrange(n_test_batches):
test_losses.append(float(test_model(minibatch_index)))
test_score = numpy.mean(test_losses)
print("Minibatch %i, mean test error %f" % (minibatch_index, test_score * 100))
if save_exp_data:
self.data_dict['test_scores'].append(test_losses)
self.save_data()
return test_score, test_losses
|
normal
|
{
"blob_id": "f9ea29f882c6491a2ac0007e4d9435c732d0967a",
"index": 8582,
"step-1": "import math\n\nimport numpy\nimport theano\n\nfrom theano import tensor as T\n\nfrom utils import shared_dataset\n\nfrom layer import HiddenLayer, LogisticRegressionLayer\nimport pickle as pkl\n\nfrom mlp import MLP, Costs, NeuralActivations\n\nDEBUGGING = False\n\nclass PostMLP(MLP):\n \"\"\"Post training:- Second phase MLP.\n A multilayer perceptron is a feedforward artificial neural network model\n that has one layer or more of hidden units and nonlinear activations.\n Intermediate layers usually have as activation function thanh or the\n sigmoid function (defined here by a ``SigmoidalLayer`` class) while the\n top layer is a softamx layer (defined here by a ``LogisticRegression``\n class).\n \"\"\"\n def __init__(self,\n input,\n n_in=64*11,\n n_hiddens=[500, 400],\n n_out=1,\n normalize_inputs=False,\n use_adagrad=True,\n activation=NeuralActivations.Rectifier,\n exp_id=1,\n rng=None,\n params_first_phase=None):\n \"\"\"\n Initialize the parameters for the multilayer perceptron\n\n :type rng: numpy.random.RandomState\n :param rng: a random number generator used to initialize weights\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_hidden: int\n :param n_hidden: number of hidden units\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in which\n the labels lie.\n \"\"\"\n if DEBUGGING:\n theano.config.compute_test_value = 'raise'\n self.input.tag.test_value = numpy.random.rand(1800, n_in)\n\n super(PostMLP, self).__init__(input,\n n_in,\n n_hiddens,\n n_out,\n normalize_inputs,\n use_adagrad,\n activation,\n exp_id,\n rng)\n\n self.params_first_phase = params_first_phase\n\n def train(self,\n data=None,\n labels=None,\n **kwargs):\n\n learning_rate = kwargs[\"learning_rate\"]\n L1_reg = kwargs[\"L1_reg\"]\n L2_reg = kwargs[\"L2_reg\"]\n n_epochs = kwargs[\"nepochs\"]\n cost_type = kwargs[\"cost_type\"]\n save_exp_data = kwargs[\"save_exp_data\"]\n batch_size = kwargs[\"batch_size\"]\n normalize_weights = kwargs[\"normalize_weights\"]\n enable_dropout = kwargs[\"enable_dropout\"]\n\n if data is None:\n raise Exception(\"Post-training can't start without pretraining class membership probabilities.\")\n\n if labels is None:\n raise Exception(\"Post-training can not start without posttraining class labels.\")\n\n self.state = \"train\"\n\n self.learning_rate = learning_rate\n\n train_set_x = shared_dataset(data, name=\"training_set_x\")\n train_set_y = shared_dataset(labels, name=\"labels\")\n train_set_y = T.cast(train_set_y, \"int32\")\n\n # compute number of minibatches for training\n n_examples = data.shape[0]\n n_train_batches = int(math.ceil(n_examples / batch_size))\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '...postraining the model'\n # allocate symbolic variables for the data\n index = T.lscalar('index') # index to a [mini]batch\n y = T.ivector('y') # the labels are presented as 1D vector of int32\n\n mode = \"FAST_RUN\"\n #import pudb; pudb.set_trace()\n if DEBUGGING:\n index.tag.test_value = 0\n y.tag.test_value = numpy.ones(n_examples)\n mode = \"DEBUG_MODE\"\n\n # the cost we minimize during training is the negative log likelihood of\n # the model plus the regularization terms (L1 and L2); cost is expressed\n # here symbolically.\n cost = self.get_cost_function(cost_type, y, L1_reg, L2_reg)\n updates = self.sgd_updates(cost, learning_rate)\n\n # compiling a Theano function `train_model` that returns the cost, butx\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n # p_y_given_x = self.class_memberships\n train_model = theano.function(inputs=[index],\n outputs=cost,\n updates = updates,\n givens = {\n self.input: train_set_x[index * batch_size:(index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n },\n mode=mode)\n\n if DEBUGGING:\n theano.printing.debugprint(train_model)\n\n epoch = 0\n costs = []\n Ws = []\n\n while (epoch < n_epochs):\n print \"In da epoch %d\" % (epoch)\n for minibatch_index in xrange(n_train_batches):\n print \"Postraining in Minibatch %i \" % (minibatch_index)\n minibatch_avg_cost = train_model(minibatch_index)\n if enable_dropout:\n self.dropout()\n\n if normalize_weights:\n self.normalize_weights()\n\n costs.append(float(minibatch_avg_cost))\n Ws.append(self.params[2])\n epoch +=1\n\n if save_exp_data:\n self.data_dict['Ws'].append(Ws)\n self.data_dict['costs'].append([costs])\n self.save_data()\n return costs\n\n def test(self,\n data=None,\n labels=None,\n **kwargs):\n\n save_exp_data = kwargs[\"save_exp_data\"]\n batch_size = kwargs[\"batch_size\"]\n\n if data is None:\n raise Exception(\"Post-training can't start without pretraining class membership probabilities.\")\n\n if labels is None:\n raise Exception(\"Post-training can not start without posttraining class-membership probabilities.\")\n\n test_set_x = shared_dataset(data)\n test_set_y = shared_dataset(labels)\n test_set_y = T.cast(test_set_y, \"int32\")\n\n self.state = \"test\"\n\n # compute number of minibatches for training, validation and testing\n n_examples = data.shape[0]\n n_test_batches = int(math.ceil(n_examples / batch_size))\n\n print '...post-testing the model'\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n mode = \"FAST_RUN\"\n if DEBUGGING:\n theano.config.compute_test_value = 'raise'\n index.tag.test_value = 0\n y.tag.test_value = numpy.ones(n_examples)\n mode = \"DEBUG_MODE\"\n\n # the cost we minimize during training is the negative log likelihood of\n # the model plus the regularization terms (L1 and L2); cost is expressed\n # here symbolically\n\n # compiling a Theano function `test_model` that returns the cost, but\n # in the same time updates the parameter of the model based on the rules\n # defined in `updates`\n\n test_model = theano.function(inputs=[index],\n outputs=self.errors(y),\n givens={\n self.input: test_set_x[index * batch_size:(index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]},\n mode=mode)\n\n ###############\n # TEST MODEL #\n ###############\n\n test_losses = []\n\n for minibatch_index in xrange(n_test_batches):\n test_losses.append(float(test_model(minibatch_index)))\n test_score = numpy.mean(test_losses)\n print(\"Minibatch %i, mean test error %f\" % (minibatch_index, test_score * 100))\n\n if save_exp_data:\n self.data_dict['test_scores'].append(test_losses)\n self.save_data()\n\n return test_score, test_losses\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def page_html(requested_url):
try:
headers = {'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request_obj = Request(url=requested_url, headers=headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
pass
<|reserved_special_token_0|>
def max_num_jobs(page_html):
page_soup = soup(page_html, 'html.parser')
max_ = page_soup.find('p', {'class': 'jobsCount'})
return max_.get_text()
<|reserved_special_token_0|>
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, 'html.parser')
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
jobpage_info['job_link'] = None
try:
job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find('div', {'class': 'summaryColumn'})
summary_column = sum_col.get_text()
summary_column = summary_column.replace('\xa0–\xa0', ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', ' ')
jobpage_info['job_description'] = job_desc
except Exception as e:
jobpage_info['job_description'] = None
return jobpage_info
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def page_html(requested_url):
try:
headers = {'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request_obj = Request(url=requested_url, headers=headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
pass
<|reserved_special_token_0|>
def max_num_jobs(page_html):
page_soup = soup(page_html, 'html.parser')
max_ = page_soup.find('p', {'class': 'jobsCount'})
return max_.get_text()
<|reserved_special_token_0|>
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, 'html.parser')
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
jobpage_info['job_link'] = None
try:
job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find('div', {'class': 'summaryColumn'})
summary_column = sum_col.get_text()
summary_column = summary_column.replace('\xa0–\xa0', ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', ' ')
jobpage_info['job_description'] = job_desc
except Exception as e:
jobpage_info['job_description'] = None
return jobpage_info
<|reserved_special_token_0|>
def write_to_file(jobpage_info):
with open('output.csv', 'a', newline='', encoding='utf-8') as f:
try:
writer = csv.writer(f)
writer.writerow(jobpage_info.values())
except Exception as e:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def page_html(requested_url):
try:
headers = {'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request_obj = Request(url=requested_url, headers=headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
pass
<|reserved_special_token_0|>
def max_num_jobs(page_html):
page_soup = soup(page_html, 'html.parser')
max_ = page_soup.find('p', {'class': 'jobsCount'})
return max_.get_text()
<|reserved_special_token_0|>
def get_listing_links(page_html):
try:
obj_links = {}
id_temp_dict = {}
page_soup = soup(page_html, 'html.parser')
results = page_soup.findAll('ul', {'class': 'jlGrid hover'})
for result in results:
links = result.findAll('a')
for a in links:
formatted_link = 'http://www.glassdoor.sg' + a['href']
id_temp = formatted_link[-10:]
if id_temp not in id_temp_dict.keys():
id_temp_dict[id_temp] = None
obj_links[formatted_link] = None
return list(obj_links.keys())
except Exception as e:
pass
<|reserved_special_token_0|>
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, 'html.parser')
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
jobpage_info['job_link'] = None
try:
job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find('div', {'class': 'summaryColumn'})
summary_column = sum_col.get_text()
summary_column = summary_column.replace('\xa0–\xa0', ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', ' ')
jobpage_info['job_description'] = job_desc
except Exception as e:
jobpage_info['job_description'] = None
return jobpage_info
<|reserved_special_token_0|>
def write_to_file(jobpage_info):
with open('output.csv', 'a', newline='', encoding='utf-8') as f:
try:
writer = csv.writer(f)
writer.writerow(jobpage_info.values())
except Exception as e:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import re
import csv
import os
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup as soup
<|reserved_special_token_0|>
def page_html(requested_url):
try:
headers = {'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request_obj = Request(url=requested_url, headers=headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
pass
<|reserved_special_token_0|>
def max_num_jobs(page_html):
page_soup = soup(page_html, 'html.parser')
max_ = page_soup.find('p', {'class': 'jobsCount'})
return max_.get_text()
<|reserved_special_token_0|>
def get_listing_links(page_html):
try:
obj_links = {}
id_temp_dict = {}
page_soup = soup(page_html, 'html.parser')
results = page_soup.findAll('ul', {'class': 'jlGrid hover'})
for result in results:
links = result.findAll('a')
for a in links:
formatted_link = 'http://www.glassdoor.sg' + a['href']
id_temp = formatted_link[-10:]
if id_temp not in id_temp_dict.keys():
id_temp_dict[id_temp] = None
obj_links[formatted_link] = None
return list(obj_links.keys())
except Exception as e:
pass
<|reserved_special_token_0|>
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, 'html.parser')
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
jobpage_info['job_link'] = None
try:
job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find('div', {'class': 'summaryColumn'})
summary_column = sum_col.get_text()
summary_column = summary_column.replace('\xa0–\xa0', ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', ' ')
jobpage_info['job_description'] = job_desc
except Exception as e:
jobpage_info['job_description'] = None
return jobpage_info
<|reserved_special_token_0|>
def write_to_file(jobpage_info):
with open('output.csv', 'a', newline='', encoding='utf-8') as f:
try:
writer = csv.writer(f)
writer.writerow(jobpage_info.values())
except Exception as e:
pass
<|reserved_special_token_1|>
'''
Import necessary libraries
'''
import re
import csv
import os
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup as soup
'''
Function to request page html from given URL
'''
def page_html(requested_url):
try:
# define headers to be provided for request authentication
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.11 (KHTML, like Gecko) '
'Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# make request, read request object to get page html and return it.
request_obj = Request(url = requested_url, headers = headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
return page_html
except Exception as e:
# print(e)
pass
'''
Function to acquire the maximum number of jobs (only applicable for the base/ first html)
'''
def max_num_jobs(page_html):
page_soup = soup(page_html, "html.parser")
max_ = page_soup.find("p", {"class": "jobsCount"})
return(max_.get_text())
'''
Function to return a list of job page links from a given html page
'''
def get_listing_links(page_html):
try:
# use of dictionary to make sure that there are no duplicates
obj_links = {}
id_temp_dict = {}
page_soup = soup(page_html, "html.parser")
#grab all divs with a class of result
results = page_soup.findAll("ul", {"class": "jlGrid hover"})
for result in results:
links = result.findAll('a')
for a in links:
formatted_link = "http://www.glassdoor.sg" + a['href']
id_temp = formatted_link[-10:]
if id_temp not in id_temp_dict.keys():
id_temp_dict[id_temp] = None
obj_links[formatted_link] = None
return list(obj_links.keys())
except Exception as e:
# print(e)
pass
'''
Function to return a dictionary of scrapped information from a single job page link
'''
def jobpage_scrape(extracted_link, page_html):
jobpage_info = {}
page_soup = soup(page_html, "html.parser")
try:
jobpage_info['job_link'] = extracted_link
except Exception as e:
# print(e)
jobpage_info['job_link'] = None
try:
job_title = page_soup.find("div", {"class": "jobViewJobTitleWrap"})
jobpage_info['job_title'] = job_title.get_text()
except Exception as e:
# print(e)
jobpage_info['job_title'] = None
try:
sum_col = page_soup.find("div", {"class": "summaryColumn"})
summary_column = sum_col.get_text()
summary_column = summary_column.replace("\xa0–\xa0", ' ')
jobpage_info['summary_column'] = summary_column
except Exception as e:
# print(e)
jobpage_info['summary_column'] = None
try:
j_d = page_soup.find("div", {"class": "jobDescriptionContent desc"})
job_desc = j_d.get_text()
pattern = '\n' + '{2,}'
job_desc = re.sub(pattern, '\n', job_desc)
job_desc = job_desc.replace('\n', " ")
jobpage_info['job_description'] = job_desc
except Exception as e:
# print(e)
jobpage_info['job_description'] = None
return jobpage_info
'''
Function to write a dictionary of scrapped information onto a csv file
'''
def write_to_file(jobpage_info):
with open('output.csv', 'a', newline='', encoding="utf-8") as f:
try:
writer = csv.writer(f)
writer.writerow(jobpage_info.values())
except Exception as e:
# print(e)
pass
|
flexible
|
{
"blob_id": "5bfb7fc60ddf4f6ad6d89771eb0a8903b04da3d9",
"index": 6187,
"step-1": "<mask token>\n\n\ndef page_html(requested_url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n request_obj = Request(url=requested_url, headers=headers)\n opened_url = urlopen(request_obj)\n page_html = opened_url.read()\n opened_url.close()\n return page_html\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef max_num_jobs(page_html):\n page_soup = soup(page_html, 'html.parser')\n max_ = page_soup.find('p', {'class': 'jobsCount'})\n return max_.get_text()\n\n\n<mask token>\n\n\ndef jobpage_scrape(extracted_link, page_html):\n jobpage_info = {}\n page_soup = soup(page_html, 'html.parser')\n try:\n jobpage_info['job_link'] = extracted_link\n except Exception as e:\n jobpage_info['job_link'] = None\n try:\n job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})\n jobpage_info['job_title'] = job_title.get_text()\n except Exception as e:\n jobpage_info['job_title'] = None\n try:\n sum_col = page_soup.find('div', {'class': 'summaryColumn'})\n summary_column = sum_col.get_text()\n summary_column = summary_column.replace('\\xa0–\\xa0', ' ')\n jobpage_info['summary_column'] = summary_column\n except Exception as e:\n jobpage_info['summary_column'] = None\n try:\n j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})\n job_desc = j_d.get_text()\n pattern = '\\n' + '{2,}'\n job_desc = re.sub(pattern, '\\n', job_desc)\n job_desc = job_desc.replace('\\n', ' ')\n jobpage_info['job_description'] = job_desc\n except Exception as e:\n jobpage_info['job_description'] = None\n return jobpage_info\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef page_html(requested_url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n request_obj = Request(url=requested_url, headers=headers)\n opened_url = urlopen(request_obj)\n page_html = opened_url.read()\n opened_url.close()\n return page_html\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef max_num_jobs(page_html):\n page_soup = soup(page_html, 'html.parser')\n max_ = page_soup.find('p', {'class': 'jobsCount'})\n return max_.get_text()\n\n\n<mask token>\n\n\ndef jobpage_scrape(extracted_link, page_html):\n jobpage_info = {}\n page_soup = soup(page_html, 'html.parser')\n try:\n jobpage_info['job_link'] = extracted_link\n except Exception as e:\n jobpage_info['job_link'] = None\n try:\n job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})\n jobpage_info['job_title'] = job_title.get_text()\n except Exception as e:\n jobpage_info['job_title'] = None\n try:\n sum_col = page_soup.find('div', {'class': 'summaryColumn'})\n summary_column = sum_col.get_text()\n summary_column = summary_column.replace('\\xa0–\\xa0', ' ')\n jobpage_info['summary_column'] = summary_column\n except Exception as e:\n jobpage_info['summary_column'] = None\n try:\n j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})\n job_desc = j_d.get_text()\n pattern = '\\n' + '{2,}'\n job_desc = re.sub(pattern, '\\n', job_desc)\n job_desc = job_desc.replace('\\n', ' ')\n jobpage_info['job_description'] = job_desc\n except Exception as e:\n jobpage_info['job_description'] = None\n return jobpage_info\n\n\n<mask token>\n\n\ndef write_to_file(jobpage_info):\n with open('output.csv', 'a', newline='', encoding='utf-8') as f:\n try:\n writer = csv.writer(f)\n writer.writerow(jobpage_info.values())\n except Exception as e:\n pass\n",
"step-3": "<mask token>\n\n\ndef page_html(requested_url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n request_obj = Request(url=requested_url, headers=headers)\n opened_url = urlopen(request_obj)\n page_html = opened_url.read()\n opened_url.close()\n return page_html\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef max_num_jobs(page_html):\n page_soup = soup(page_html, 'html.parser')\n max_ = page_soup.find('p', {'class': 'jobsCount'})\n return max_.get_text()\n\n\n<mask token>\n\n\ndef get_listing_links(page_html):\n try:\n obj_links = {}\n id_temp_dict = {}\n page_soup = soup(page_html, 'html.parser')\n results = page_soup.findAll('ul', {'class': 'jlGrid hover'})\n for result in results:\n links = result.findAll('a')\n for a in links:\n formatted_link = 'http://www.glassdoor.sg' + a['href']\n id_temp = formatted_link[-10:]\n if id_temp not in id_temp_dict.keys():\n id_temp_dict[id_temp] = None\n obj_links[formatted_link] = None\n return list(obj_links.keys())\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef jobpage_scrape(extracted_link, page_html):\n jobpage_info = {}\n page_soup = soup(page_html, 'html.parser')\n try:\n jobpage_info['job_link'] = extracted_link\n except Exception as e:\n jobpage_info['job_link'] = None\n try:\n job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})\n jobpage_info['job_title'] = job_title.get_text()\n except Exception as e:\n jobpage_info['job_title'] = None\n try:\n sum_col = page_soup.find('div', {'class': 'summaryColumn'})\n summary_column = sum_col.get_text()\n summary_column = summary_column.replace('\\xa0–\\xa0', ' ')\n jobpage_info['summary_column'] = summary_column\n except Exception as e:\n jobpage_info['summary_column'] = None\n try:\n j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})\n job_desc = j_d.get_text()\n pattern = '\\n' + '{2,}'\n job_desc = re.sub(pattern, '\\n', job_desc)\n job_desc = job_desc.replace('\\n', ' ')\n jobpage_info['job_description'] = job_desc\n except Exception as e:\n jobpage_info['job_description'] = None\n return jobpage_info\n\n\n<mask token>\n\n\ndef write_to_file(jobpage_info):\n with open('output.csv', 'a', newline='', encoding='utf-8') as f:\n try:\n writer = csv.writer(f)\n writer.writerow(jobpage_info.values())\n except Exception as e:\n pass\n",
"step-4": "<mask token>\nimport re\nimport csv\nimport os\nfrom urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup as soup\n<mask token>\n\n\ndef page_html(requested_url):\n try:\n headers = {'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n request_obj = Request(url=requested_url, headers=headers)\n opened_url = urlopen(request_obj)\n page_html = opened_url.read()\n opened_url.close()\n return page_html\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef max_num_jobs(page_html):\n page_soup = soup(page_html, 'html.parser')\n max_ = page_soup.find('p', {'class': 'jobsCount'})\n return max_.get_text()\n\n\n<mask token>\n\n\ndef get_listing_links(page_html):\n try:\n obj_links = {}\n id_temp_dict = {}\n page_soup = soup(page_html, 'html.parser')\n results = page_soup.findAll('ul', {'class': 'jlGrid hover'})\n for result in results:\n links = result.findAll('a')\n for a in links:\n formatted_link = 'http://www.glassdoor.sg' + a['href']\n id_temp = formatted_link[-10:]\n if id_temp not in id_temp_dict.keys():\n id_temp_dict[id_temp] = None\n obj_links[formatted_link] = None\n return list(obj_links.keys())\n except Exception as e:\n pass\n\n\n<mask token>\n\n\ndef jobpage_scrape(extracted_link, page_html):\n jobpage_info = {}\n page_soup = soup(page_html, 'html.parser')\n try:\n jobpage_info['job_link'] = extracted_link\n except Exception as e:\n jobpage_info['job_link'] = None\n try:\n job_title = page_soup.find('div', {'class': 'jobViewJobTitleWrap'})\n jobpage_info['job_title'] = job_title.get_text()\n except Exception as e:\n jobpage_info['job_title'] = None\n try:\n sum_col = page_soup.find('div', {'class': 'summaryColumn'})\n summary_column = sum_col.get_text()\n summary_column = summary_column.replace('\\xa0–\\xa0', ' ')\n jobpage_info['summary_column'] = summary_column\n except Exception as e:\n jobpage_info['summary_column'] = None\n try:\n j_d = page_soup.find('div', {'class': 'jobDescriptionContent desc'})\n job_desc = j_d.get_text()\n pattern = '\\n' + '{2,}'\n job_desc = re.sub(pattern, '\\n', job_desc)\n job_desc = job_desc.replace('\\n', ' ')\n jobpage_info['job_description'] = job_desc\n except Exception as e:\n jobpage_info['job_description'] = None\n return jobpage_info\n\n\n<mask token>\n\n\ndef write_to_file(jobpage_info):\n with open('output.csv', 'a', newline='', encoding='utf-8') as f:\n try:\n writer = csv.writer(f)\n writer.writerow(jobpage_info.values())\n except Exception as e:\n pass\n",
"step-5": "'''\r\nImport necessary libraries\r\n'''\r\nimport re\r\nimport csv\r\nimport os\r\nfrom urllib.request import urlopen, Request\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\n'''\r\nFunction to request page html from given URL\r\n'''\r\ndef page_html(requested_url):\r\n try:\r\n # define headers to be provided for request authentication\r\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' \r\n 'AppleWebKit/537.11 (KHTML, like Gecko) '\r\n 'Chrome/23.0.1271.64 Safari/537.11',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\r\n 'Accept-Encoding': 'none',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'Connection': 'keep-alive'}\r\n # make request, read request object to get page html and return it.\r\n request_obj = Request(url = requested_url, headers = headers)\r\n opened_url = urlopen(request_obj)\r\n page_html = opened_url.read()\r\n opened_url.close()\r\n return page_html\r\n except Exception as e:\r\n # print(e)\r\n pass\r\n\r\n'''\r\nFunction to acquire the maximum number of jobs (only applicable for the base/ first html)\r\n'''\r\ndef max_num_jobs(page_html):\r\n page_soup = soup(page_html, \"html.parser\")\r\n max_ = page_soup.find(\"p\", {\"class\": \"jobsCount\"})\r\n return(max_.get_text())\r\n\r\n'''\r\nFunction to return a list of job page links from a given html page\r\n'''\r\ndef get_listing_links(page_html):\r\n try:\r\n # use of dictionary to make sure that there are no duplicates\r\n obj_links = {}\r\n id_temp_dict = {}\r\n page_soup = soup(page_html, \"html.parser\")\r\n #grab all divs with a class of result\r\n results = page_soup.findAll(\"ul\", {\"class\": \"jlGrid hover\"})\r\n for result in results:\r\n links = result.findAll('a')\r\n for a in links:\r\n formatted_link = \"http://www.glassdoor.sg\" + a['href']\r\n id_temp = formatted_link[-10:]\r\n if id_temp not in id_temp_dict.keys():\r\n id_temp_dict[id_temp] = None\r\n obj_links[formatted_link] = None\r\n return list(obj_links.keys())\r\n except Exception as e:\r\n # print(e)\r\n pass\r\n\r\n'''\r\nFunction to return a dictionary of scrapped information from a single job page link \r\n'''\r\ndef jobpage_scrape(extracted_link, page_html):\r\n jobpage_info = {}\r\n page_soup = soup(page_html, \"html.parser\")\r\n try:\r\n jobpage_info['job_link'] = extracted_link\r\n except Exception as e:\r\n # print(e)\r\n jobpage_info['job_link'] = None\r\n\r\n try:\r\n job_title = page_soup.find(\"div\", {\"class\": \"jobViewJobTitleWrap\"})\r\n jobpage_info['job_title'] = job_title.get_text()\r\n except Exception as e:\r\n # print(e)\r\n jobpage_info['job_title'] = None\r\n\r\n try:\r\n sum_col = page_soup.find(\"div\", {\"class\": \"summaryColumn\"})\r\n summary_column = sum_col.get_text()\r\n summary_column = summary_column.replace(\"\\xa0–\\xa0\", ' ')\r\n jobpage_info['summary_column'] = summary_column\r\n except Exception as e:\r\n # print(e)\r\n jobpage_info['summary_column'] = None\r\n\r\n try:\r\n j_d = page_soup.find(\"div\", {\"class\": \"jobDescriptionContent desc\"})\r\n job_desc = j_d.get_text()\r\n pattern = '\\n' + '{2,}'\r\n job_desc = re.sub(pattern, '\\n', job_desc)\r\n job_desc = job_desc.replace('\\n', \" \")\r\n jobpage_info['job_description'] = job_desc\r\n except Exception as e:\r\n # print(e)\r\n jobpage_info['job_description'] = None\r\n\r\n return jobpage_info\r\n\r\n'''\r\nFunction to write a dictionary of scrapped information onto a csv file\r\n'''\r\ndef write_to_file(jobpage_info):\r\n with open('output.csv', 'a', newline='', encoding=\"utf-8\") as f:\r\n try:\r\n writer = csv.writer(f)\r\n writer.writerow(jobpage_info.values())\r\n except Exception as e:\r\n # print(e)\r\n pass",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
import random
#import matplotlib.pyplot as plt
import numpy as np
import time
class Waterfilling:
"""
initializes x and r with optimal flow allocations
and link fair share rates for traffic matrix routes and link
capacities c, and level with number of levels
after running the waterfilling algorithm. note
that if sum of flow allocations at a link is less than capacity
then fair share of link is float('inf').
not that routes and c must be initialized before calling this.
"""
def __init__(self, routes, c, log, prec_library):
#log = True
#print "Waterfilling"
#print mpmath.mp
(self.num_flows, self.num_links) = routes.shape
self.levels = np.ones((self.num_links, 1)) * float('inf')
self.prec_library = prec_library
eps = prec_library.eps1
weights = np.ones((self.num_flows,1))
#print("weights", weights.shape, weights)
#print("routes", routes.shape, routes)
#self.r = np.ones((self.num_links,1)) * mpf_inf
#self.x = np.ones((self.num_flows,1)) * mpf_inf
x = np.zeros((self.num_flows,1))
active_flows = np.ones((self.num_flows, 1), dtype=bool)
rem_cap = c #np.ones((self.num_links, 1)) * prec_library.mpf_one
# for i in range(self.num_links):
# rem_cap[i] = prec_library.mpf(c[i,0])
self.max_level = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
while num_active_flows > 0:
# number of rem flows on all links
link_weights = np.dot(routes.T, weights)
assert(rem_cap.shape == link_weights.shape)
try:
fair_shares = np.where(link_weights>0, rem_cap/link_weights, float('inf'))
except:
pass
#print("link_weights", link_weights)
#print("rem_cap", rem_cap)
#print("fair_shares", fair_shares)
fair_shares.reshape(self.num_links, 1)
bl = np.argmin(fair_shares)
#print ("bl",type(bl),bl)
inc = float(fair_shares[bl, 0])
assert(inc < float('inf'))
# increase level, only when link with smallest fair share rate
# has a rate larger than last one, handles the following example
# two links, each cap 10.0, each has one flow, and none in common
# each link identified in different iterations of this loop
if self.max_level == 0 or inc > eps: self.max_level += 1
x = np.where(active_flows, x + inc * weights, x)
if log:
print "In round",self.max_level,\
" link", bl, "has smallest fair share", inc, "b/s",\
"Next rate increase is", inc, " (type ", type(inc), ") cuz of bl ",\
bl, " with rem_cap ", rem_cap[bl,0], " b/s",\
"and ", link_weights[bl,0] , " of the total ",\
num_active_flows, " remaining flows"
rem_cap = rem_cap - inc * link_weights
neg_cap = list(np.where(rem_cap < -1e7)[0]) # for each (aka only) column
if (len(neg_cap) > 0):
print >> sys.stderr, "warning! in watefilling hp links with neg. rem_cap ", neg_cap
bf = np.where(routes[:,bl] > 0)[0]
active_flows[bf] = 0
num_active_flows = np.count_nonzero(active_flows, axis=0)
#print(num_active_flows,"flows left")
weights[bf] = 0
self.levels[bl] = self.max_level
# get max. rate at each link
r = np.ones((self.num_links,1)) * float('inf')
for e in range(self.num_links):
flows = np.nonzero(routes[:, e])[0]
if len(flows) > 0:
sum_demands = sum(x[flows])[0]
cap = c[e,0]
diff = abs(sum_demands - cap)
if (sum_demands > cap or diff < eps):
r[e] = max(x[flows])
print "link",e,"has rate", r[e]
self.level = self.max_level
self.x = x
self.r = r
self.bottleneck_links_arr = np.where(self.r < float('inf'))[0]
self.bottleneck_links = {}
self.non_bottleneck_links = {}
self.sat_flows = {}
self.unsat_flows = {}
# class Eps:
# def __init__(self):
# self.eps1 = 1e-7
# pass
# def main():
# for num_flows in [10, 100, 1000, 10000]:
# start = time.time()
# routes = np.ones((num_flows, 2))
# routes[:, 1] = 0
# routes[0:2, 1] = 1
# routes[0, 0] = 0
# c = np.ones((2,1))
# wf = Waterfilling(routes, c, True, Eps())
# stop = time.time()
# elapsed = stop - start
# print("num_flows", num_flows, "elapsed", elapsed,"s")
# #print wf.x
# #print wf.r
# #print wf.level
# pass
# main()
|
normal
|
{
"blob_id": "93e534e8d425510b59310dcbfc5bca9cc32f245e",
"index": 9798,
"step-1": "import sys\nimport random\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nclass Waterfilling:\n \"\"\"\n initializes x and r with optimal flow allocations\n and link fair share rates for traffic matrix routes and link\n capacities c, and level with number of levels\n after running the waterfilling algorithm. note\n that if sum of flow allocations at a link is less than capacity\n then fair share of link is float('inf').\n not that routes and c must be initialized before calling this.\n \"\"\" \n\n def __init__(self, routes, c, log, prec_library):\n #log = True\n #print \"Waterfilling\"\n #print mpmath.mp\n \n (self.num_flows, self.num_links) = routes.shape\n self.levels = np.ones((self.num_links, 1)) * float('inf')\n self.prec_library = prec_library\n \n eps = prec_library.eps1\n weights = np.ones((self.num_flows,1))\n #print(\"weights\", weights.shape, weights)\n #print(\"routes\", routes.shape, routes)\n #self.r = np.ones((self.num_links,1)) * mpf_inf\n #self.x = np.ones((self.num_flows,1)) * mpf_inf \n\n x = np.zeros((self.num_flows,1))\n active_flows = np.ones((self.num_flows, 1), dtype=bool)\n\n \n rem_cap = c #np.ones((self.num_links, 1)) * prec_library.mpf_one\n # for i in range(self.num_links):\n # rem_cap[i] = prec_library.mpf(c[i,0])\n\n\n self.max_level = 0\n num_active_flows = np.count_nonzero(active_flows, axis=0)\n #print(num_active_flows,\"flows left\")\n\n while num_active_flows > 0:\n \n # number of rem flows on all links\n link_weights = np.dot(routes.T, weights)\n assert(rem_cap.shape == link_weights.shape)\n try:\n fair_shares = np.where(link_weights>0, rem_cap/link_weights, float('inf'))\n except:\n pass\n #print(\"link_weights\", link_weights)\n #print(\"rem_cap\", rem_cap)\n #print(\"fair_shares\", fair_shares)\n fair_shares.reshape(self.num_links, 1)\n bl = np.argmin(fair_shares)\n #print (\"bl\",type(bl),bl)\n inc = float(fair_shares[bl, 0])\n assert(inc < float('inf'))\n\n # increase level, only when link with smallest fair share rate\n # has a rate larger than last one, handles the following example\n # two links, each cap 10.0, each has one flow, and none in common\n # each link identified in different iterations of this loop\n if self.max_level == 0 or inc > eps: self.max_level += 1\n x = np.where(active_flows, x + inc * weights, x)\n\n if log:\n print \"In round\",self.max_level,\\\n \" link\", bl, \"has smallest fair share\", inc, \"b/s\",\\\n \"Next rate increase is\", inc, \" (type \", type(inc), \") cuz of bl \",\\\n bl, \" with rem_cap \", rem_cap[bl,0], \" b/s\",\\\n \"and \", link_weights[bl,0] , \" of the total \",\\\n num_active_flows, \" remaining flows\"\n rem_cap = rem_cap - inc * link_weights\n neg_cap = list(np.where(rem_cap < -1e7)[0]) # for each (aka only) column \n if (len(neg_cap) > 0):\n print >> sys.stderr, \"warning! in watefilling hp links with neg. rem_cap \", neg_cap\n bf = np.where(routes[:,bl] > 0)[0]\n active_flows[bf] = 0\n num_active_flows = np.count_nonzero(active_flows, axis=0)\n #print(num_active_flows,\"flows left\")\n weights[bf] = 0\n self.levels[bl] = self.max_level\n \n # get max. rate at each link\n r = np.ones((self.num_links,1)) * float('inf')\n for e in range(self.num_links):\n flows = np.nonzero(routes[:, e])[0]\n if len(flows) > 0:\n sum_demands = sum(x[flows])[0]\n cap = c[e,0]\n diff = abs(sum_demands - cap)\n if (sum_demands > cap or diff < eps):\n r[e] = max(x[flows])\n print \"link\",e,\"has rate\", r[e]\n\n self.level = self.max_level\n self.x = x\n self.r = r\n\n self.bottleneck_links_arr = np.where(self.r < float('inf'))[0]\n self.bottleneck_links = {}\n self.non_bottleneck_links = {}\n\n self.sat_flows = {}\n self.unsat_flows = {}\n\n# class Eps:\n# def __init__(self):\n# self.eps1 = 1e-7\n# pass\n\n# def main():\n# for num_flows in [10, 100, 1000, 10000]:\n# start = time.time()\n# routes = np.ones((num_flows, 2))\n# routes[:, 1] = 0\n# routes[0:2, 1] = 1\n# routes[0, 0] = 0\n# c = np.ones((2,1))\n \n# wf = Waterfilling(routes, c, True, Eps())\n# stop = time.time()\n# elapsed = stop - start\n# print(\"num_flows\", num_flows, \"elapsed\", elapsed,\"s\")\n# #print wf.x\n# #print wf.r\n# #print wf.level\n# pass\n\n# main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Node:
<|reserved_special_token_0|>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
<|reserved_special_token_0|>
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
<|reserved_special_token_0|>
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
<|reserved_special_token_1|>
class Node:
<|reserved_special_token_0|>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
<|reserved_special_token_0|>
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
<|reserved_special_token_1|>
class Node:
<|reserved_special_token_0|>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) ->tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
<|reserved_special_token_1|>
class Node:
"""
This class represent a node (vertex).
"""
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) ->tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
<|reserved_special_token_1|>
class Node:
"""
This class represent a node (vertex).
"""
def __init__(self, k: int = None, loc: tuple = None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) -> None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) -> None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) -> dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) -> dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) -> int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) -> tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) -> None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {"pos": loc_as_str[1:-1], "id": self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {"src": int(self.get_key()), "w": float(v), "dest": int(k)}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) -> str:
return "Node: id: " + str(self.__key) + ' neighbors: ' + str(self.__ni_out)
def __eq__(self, o: object) -> bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.__location) and self.__ni_in.__eq__(
other.__ni_in) and self.__ni_out.__eq__(other.__ni_out)
|
flexible
|
{
"blob_id": "9c3f6c368c764918da5cce44da574b7c041fa414",
"index": 1364,
"step-1": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n <mask token>\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n <mask token>\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n",
"step-2": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n <mask token>\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n",
"step-3": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) ->tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n",
"step-4": "class Node:\n \"\"\"\n This class represent a node (vertex).\n \"\"\"\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) ->tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n",
"step-5": "class Node:\n \"\"\"\n This class represent a node (vertex).\n \"\"\"\n\n def __init__(self, k: int = None, loc: tuple = None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) -> None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) -> None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) -> dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) -> dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) -> int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) -> tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) -> None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {\"pos\": loc_as_str[1:-1], \"id\": self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {\"src\": int(self.get_key()), \"w\": float(v), \"dest\": int(k)}\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) -> str:\n return \"Node: id: \" + str(self.__key) + ' neighbors: ' + str(self.__ni_out)\n\n def __eq__(self, o: object) -> bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.__location) and self.__ni_in.__eq__(\n other.__ni_in) and self.__ni_out.__eq__(other.__ni_out)",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import hashlib
from xml.sax.saxutils import escape
from struct import unpack, pack
import textwrap
import json
from .anconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range
def disable_print_colors():
colors = save_colors()
remove_colors()
return colors
def enable_print_colors(colors):
enable_colors(colors)
# Handle exit message
def Exit(msg):
warning("Error : " + msg)
raise ("oops")
def Warning(msg):
warning(msg)
def _PrintBanner():
print_fct = CONF["PRINT_FCT"]
print_fct("*" * 75 + "\n")
def _PrintSubBanner(title=None):
print_fct = CONF["PRINT_FCT"]
if title == None:
print_fct("#" * 20 + "\n")
else:
print_fct("#" * 10 + " " + title + "\n")
def _PrintNote(note, tab=0):
print_fct = CONF["PRINT_FCT"]
note_color = CONF["COLORS"]["NOTE"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct("\t" * tab + "%s# %s%s" % (note_color, note, normal_color) + "\n")
# Print arg into a correct format
def _Print(name, arg):
buff = name + " "
if type(arg).__name__ == 'int':
buff += "0x%x" % arg
elif type(arg).__name__ == 'long':
buff += "0x%x" % arg
elif type(arg).__name__ == 'str':
buff += "%s" % arg
elif isinstance(arg, SV):
buff += "0x%x" % arg.get_value()
elif isinstance(arg, SVs):
buff += arg.get_value().__str__()
print(buff)
def PrettyShowEx(exceptions):
if len(exceptions) > 0:
CONF["PRINT_FCT"]("Exceptions:\n")
for i in exceptions:
CONF["PRINT_FCT"]("\t%s%s%s\n" %
(CONF["COLORS"]["EXCEPTION"], i.show_buff(),
CONF["COLORS"]["NORMAL"]))
def _PrintXRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j.get_idx()
for j in i[1])))
def _PrintDRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j for j in i[1])))
def _PrintDefault(msg):
print_fct = CONF["PRINT_FCT"]
print_fct(msg)
def PrettyShow(m_a, basic_blocks, notes={}):
idx = 0
nb = 0
offset_color = CONF["COLORS"]["OFFSET"]
offset_addr_color = CONF["COLORS"]["OFFSET_ADDR"]
instruction_name_color = CONF["COLORS"]["INSTRUCTION_NAME"]
branch_false_color = CONF["COLORS"]["BRANCH_FALSE"]
branch_true_color = CONF["COLORS"]["BRANCH_TRUE"]
branch_color = CONF["COLORS"]["BRANCH"]
exception_color = CONF["COLORS"]["EXCEPTION"]
bb_color = CONF["COLORS"]["BB"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct = CONF["PRINT_FCT"]
colors = CONF["COLORS"]["OUTPUT"]
for i in basic_blocks:
print_fct("%s%s%s : \n" % (bb_color, i.get_name(), normal_color))
instructions = i.get_instructions()
for ins in instructions:
if nb in notes:
for note in notes[nb]:
_PrintNote(note, 1)
print_fct("\t%s%-3d%s(%s%08x%s) " %
(offset_color, nb, normal_color, offset_addr_color, idx,
normal_color))
print_fct("%s%-20s%s" %
(instruction_name_color, ins.get_name(), normal_color))
operands = ins.get_operands()
print_fct(
"%s" %
", ".join(m_a.get_vm().colorize_operands(operands, colors)))
op_value = ins.get_op_value()
if ins == instructions[-1] and i.childs:
print_fct(" ")
# packed/sparse-switch
if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:
values = i.get_special_ins(idx).get_values()
print_fct("%s[ D:%s%s " %
(branch_false_color, i.childs[0][2].get_name(),
branch_color))
print_fct(' '.join("%d:%s" % (
values[j], i.childs[j + 1][2].get_name()) for j in
range(0, len(i.childs) - 1)) + " ]%s" %
normal_color)
else:
if len(i.childs) == 2:
print_fct("%s[ %s%s " % (branch_false_color,
i.childs[0][2].get_name(),
branch_true_color))
print_fct(' '.join("%s" % c[2].get_name(
) for c in i.childs[1:]) + " ]%s" % normal_color)
else:
print_fct("%s[ " % branch_color + ' '.join(
"%s" % c[2].get_name() for c in i.childs) + " ]%s" %
normal_color)
idx += ins.get_length()
nb += 1
print_fct("\n")
if i.get_exception_analysis():
print_fct("\t%s%s%s\n" %
(exception_color, i.exception_analysis.show_buff(),
normal_color))
print_fct("\n")
class TmpBlock(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def method2json(mx, directed_graph=False):
if directed_graph:
return method2json_direct(mx)
return method2json_undirect(mx)
def method2json_undirect(mx):
d = {}
reports = []
d["reports"] = reports
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
cblock["instructions"].append(c_ins)
ins_idx += DVMBasicMethodBlockInstruction.get_length()
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
reports.append(cblock)
return json.dumps(d)
def method2json_direct(mx):
d = {}
reports = []
d["reports"] = reports
hooks = {}
l = []
for DVMBasicMethodBlock in mx.basic_blocks.gets():
for index, DVMBasicMethodBlockChild in enumerate(
DVMBasicMethodBlock.childs):
if DVMBasicMethodBlock.get_name(
) == DVMBasicMethodBlockChild[-1].get_name():
preblock = TmpBlock(DVMBasicMethodBlock.get_name() + "-pre")
cnblock = {}
cnblock["BasicBlockId"] = DVMBasicMethodBlock.get_name(
) + "-pre"
cnblock["start"] = DVMBasicMethodBlock.start
cnblock["notes"] = []
cnblock["Edge"] = [DVMBasicMethodBlock.get_name()]
cnblock["registers"] = 0
cnblock["instructions"] = []
cnblock["info_bb"] = 0
l.append(cnblock)
for parent in DVMBasicMethodBlock.fathers:
hooks[parent[-1].get_name()] = []
hooks[parent[-1].get_name()].append(preblock)
for idx, child in enumerate(parent[-1].childs):
if child[-1].get_name() == DVMBasicMethodBlock.get_name(
):
hooks[parent[-1].get_name()].append(child[-1])
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["start"] = DVMBasicMethodBlock.start
cblock["notes"] = DVMBasicMethodBlock.get_notes()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
last_instru = None
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
c_ins["formatted_operands"
] = DVMBasicMethodBlockInstruction.get_formatted_operands()
cblock["instructions"].append(c_ins)
if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or
DVMBasicMethodBlockInstruction.get_op_value() == 0x2c):
values = DVMBasicMethodBlock.get_special_ins(ins_idx)
cblock["info_next"] = values.get_values()
ins_idx += DVMBasicMethodBlockInstruction.get_length()
last_instru = DVMBasicMethodBlockInstruction
cblock["info_bb"] = 0
if DVMBasicMethodBlock.childs:
if len(DVMBasicMethodBlock.childs) > 1:
cblock["info_bb"] = 1
if (last_instru.get_op_value() == 0x2b or
last_instru.get_op_value() == 0x2c):
cblock["info_bb"] = 2
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
ok = False
if DVMBasicMethodBlock.get_name() in hooks:
if DVMBasicMethodBlockChild[-1] in hooks[
DVMBasicMethodBlock.get_name()
]:
ok = True
cblock["Edge"].append(hooks[DVMBasicMethodBlock.get_name(
)][0].get_name())
if not ok:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
exception_analysis = DVMBasicMethodBlock.get_exception_analysis()
if exception_analysis:
cblock["Exceptions"] = exception_analysis.get()
reports.append(cblock)
reports.extend(l)
return json.dumps(d)
class SV(object):
def __init__(self, size, buff):
self.__size = size
self.__value = unpack(self.__size, buff)[0]
def _get(self):
return pack(self.__size, self.__value)
def __str__(self):
return "0x%x" % self.__value
def __int__(self):
return self.__value
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = attr
class SVs(object):
def __init__(self, size, ntuple, buff):
self.__size = size
self.__value = ntuple._make(unpack(self.__size, buff))
def _get(self):
l = []
for i in self.__value._fields:
l.append(getattr(self.__value, i))
return pack(self.__size, *l)
def _export(self):
return [x for x in self.__value._fields]
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = self.__value._replace(**attr)
def __str__(self):
return self.__value.__str__()
def object_to_bytes(obj):
"""
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
"""
if isinstance(obj, str):
return bytearray(obj, "UTF-8")
elif isinstance(obj, bool):
return bytearray()
elif isinstance(obj, int):
return pack("<L", obj)
elif obj == None:
return bytearray()
elif isinstance(obj, bytearray):
return obj
else:
#print type(obj), obj
return obj.get_raw()
class MethodBC(object):
def show(self, value):
getattr(self, "show_" + value)()
class BuffHandle(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def size(self):
return len(self.__buff)
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def readNullString(self, size):
data = self.read(size)
return data
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def read_at(self, offset, size):
return self.__buff[offset:offset + size]
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def end(self):
return self.__idx == len(self.__buff)
class Buff(object):
def __init__(self, offset, buff):
self.offset = offset
self.buff = buff
self.size = len(buff)
class _Bytecode(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def readat(self, off):
if isinstance(off, SV):
off = off.value
return self.__buff[off:]
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def add_idx(self, idx):
self.__idx += idx
def register(self, type_register, fct):
self.__registers[type_register].append(fct)
def get_buff(self):
return self.__buff
def length_buff(self):
return len(self.__buff)
def set_buff(self, buff):
self.__buff = buff
def save(self, filename):
buff = self._save()
with open(filename, "wb") as fd:
fd.write(buff)
def FormatClassToJava(input):
"""
Transoform a typical xml format class into java format
:param input: the input class name
:rtype: string
"""
return "L" + input.replace(".", "/") + ";"
def FormatClassToPython(input):
i = input[:-1]
i = i.replace("/", "_")
i = i.replace("$", "_")
return i
def FormatNameToPython(input):
i = input.replace("<", "")
i = i.replace(">", "")
i = i.replace("$", "_")
return i
def FormatDescriptorToPython(input):
i = input.replace("/", "_")
i = i.replace(";", "")
i = i.replace("[", "")
i = i.replace("(", "")
i = i.replace(")", "")
i = i.replace(" ", "")
i = i.replace("$", "")
return i
class Node(object):
def __init__(self, n, s):
self.id = n
self.title = s
self.children = []
|
normal
|
{
"blob_id": "2e6f04c3ff3e47a2c3e9f6a7d93e7ce2955a2756",
"index": 8354,
"step-1": "<mask token>\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n <mask token>\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n <mask token>\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\n<mask token>\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-2": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\n<mask token>\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-3": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\n<mask token>\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-4": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\ndef _PrintSubBanner(title=None):\n print_fct = CONF['PRINT_FCT']\n if title == None:\n print_fct('#' * 20 + '\\n')\n else:\n print_fct('#' * 10 + ' ' + title + '\\n')\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\ndef object_to_bytes(obj):\n \"\"\"\n Convert a object to a bytearray or call get_raw() of the object\n if no useful type was found.\n \"\"\"\n if isinstance(obj, str):\n return bytearray(obj, 'UTF-8')\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack('<L', obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n return obj.get_raw()\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-5": "from __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nimport hashlib\nfrom xml.sax.saxutils import escape\nfrom struct import unpack, pack\nimport textwrap\n\nimport json\nfrom .anconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n# Handle exit message\ndef Exit(msg):\n warning(\"Error : \" + msg)\n raise (\"oops\")\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF[\"PRINT_FCT\"]\n print_fct(\"*\" * 75 + \"\\n\")\n\n\ndef _PrintSubBanner(title=None):\n print_fct = CONF[\"PRINT_FCT\"]\n if title == None:\n print_fct(\"#\" * 20 + \"\\n\")\n else:\n print_fct(\"#\" * 10 + \" \" + title + \"\\n\")\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF[\"PRINT_FCT\"]\n note_color = CONF[\"COLORS\"][\"NOTE\"]\n normal_color = CONF[\"COLORS\"][\"NORMAL\"]\n print_fct(\"\\t\" * tab + \"%s# %s%s\" % (note_color, note, normal_color) + \"\\n\")\n\n\n# Print arg into a correct format\ndef _Print(name, arg):\n buff = name + \" \"\n\n if type(arg).__name__ == 'int':\n buff += \"0x%x\" % arg\n elif type(arg).__name__ == 'long':\n buff += \"0x%x\" % arg\n elif type(arg).__name__ == 'str':\n buff += \"%s\" % arg\n elif isinstance(arg, SV):\n buff += \"0x%x\" % arg.get_value()\n elif isinstance(arg, SVs):\n buff += arg.get_value().__str__()\n\n print(buff)\n\n\ndef PrettyShowEx(exceptions):\n if len(exceptions) > 0:\n CONF[\"PRINT_FCT\"](\"Exceptions:\\n\")\n for i in exceptions:\n CONF[\"PRINT_FCT\"](\"\\t%s%s%s\\n\" %\n (CONF[\"COLORS\"][\"EXCEPTION\"], i.show_buff(),\n CONF[\"COLORS\"][\"NORMAL\"]))\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF[\"PRINT_FCT\"]\n for i in items:\n print_fct(\"%s: %s %s %s %s\\n\" %\n (tag, i[0].get_class_name(), i[0].get_name(),\n i[0].get_descriptor(), ' '.join(\"%x\" % j.get_idx()\n for j in i[1])))\n\n\ndef _PrintDRef(tag, items):\n print_fct = CONF[\"PRINT_FCT\"]\n for i in items:\n print_fct(\"%s: %s %s %s %s\\n\" %\n (tag, i[0].get_class_name(), i[0].get_name(),\n i[0].get_descriptor(), ' '.join(\"%x\" % j for j in i[1])))\n\n\ndef _PrintDefault(msg):\n print_fct = CONF[\"PRINT_FCT\"]\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n\n offset_color = CONF[\"COLORS\"][\"OFFSET\"]\n offset_addr_color = CONF[\"COLORS\"][\"OFFSET_ADDR\"]\n instruction_name_color = CONF[\"COLORS\"][\"INSTRUCTION_NAME\"]\n branch_false_color = CONF[\"COLORS\"][\"BRANCH_FALSE\"]\n branch_true_color = CONF[\"COLORS\"][\"BRANCH_TRUE\"]\n branch_color = CONF[\"COLORS\"][\"BRANCH\"]\n exception_color = CONF[\"COLORS\"][\"EXCEPTION\"]\n bb_color = CONF[\"COLORS\"][\"BB\"]\n normal_color = CONF[\"COLORS\"][\"NORMAL\"]\n print_fct = CONF[\"PRINT_FCT\"]\n\n colors = CONF[\"COLORS\"][\"OUTPUT\"]\n\n for i in basic_blocks:\n print_fct(\"%s%s%s : \\n\" % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n\n print_fct(\"\\t%s%-3d%s(%s%08x%s) \" %\n (offset_color, nb, normal_color, offset_addr_color, idx,\n normal_color))\n print_fct(\"%s%-20s%s\" %\n (instruction_name_color, ins.get_name(), normal_color))\n\n operands = ins.get_operands()\n print_fct(\n \"%s\" %\n \", \".join(m_a.get_vm().colorize_operands(operands, colors)))\n\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(\" \")\n\n # packed/sparse-switch\n if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct(\"%s[ D:%s%s \" %\n (branch_false_color, i.childs[0][2].get_name(),\n branch_color))\n print_fct(' '.join(\"%d:%s\" % (\n values[j], i.childs[j + 1][2].get_name()) for j in\n range(0, len(i.childs) - 1)) + \" ]%s\" %\n normal_color)\n else:\n if len(i.childs) == 2:\n print_fct(\"%s[ %s%s \" % (branch_false_color,\n i.childs[0][2].get_name(),\n branch_true_color))\n print_fct(' '.join(\"%s\" % c[2].get_name(\n ) for c in i.childs[1:]) + \" ]%s\" % normal_color)\n else:\n print_fct(\"%s[ \" % branch_color + ' '.join(\n \"%s\" % c[2].get_name() for c in i.childs) + \" ]%s\" %\n normal_color)\n\n idx += ins.get_length()\n nb += 1\n\n print_fct(\"\\n\")\n\n if i.get_exception_analysis():\n print_fct(\"\\t%s%s%s\\n\" %\n (exception_color, i.exception_analysis.show_buff(),\n normal_color))\n\n print_fct(\"\\n\")\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d[\"reports\"] = reports\n\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n\n cblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name()\n cblock[\"registers\"] = mx.get_method().get_code().get_registers_size()\n cblock[\"instructions\"] = []\n\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins[\"idx\"] = ins_idx\n c_ins[\"name\"] = DVMBasicMethodBlockInstruction.get_name()\n c_ins[\"operands\"] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n\n cblock[\"instructions\"].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n\n cblock[\"Edge\"] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock[\"Edge\"].append(DVMBasicMethodBlockChild[-1].get_name())\n\n reports.append(cblock)\n\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d[\"reports\"] = reports\n\n hooks = {}\n\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(\n DVMBasicMethodBlock.childs):\n if DVMBasicMethodBlock.get_name(\n ) == DVMBasicMethodBlockChild[-1].get_name():\n\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + \"-pre\")\n\n cnblock = {}\n cnblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name(\n ) + \"-pre\"\n cnblock[\"start\"] = DVMBasicMethodBlock.start\n cnblock[\"notes\"] = []\n\n cnblock[\"Edge\"] = [DVMBasicMethodBlock.get_name()]\n cnblock[\"registers\"] = 0\n cnblock[\"instructions\"] = []\n cnblock[\"info_bb\"] = 0\n\n l.append(cnblock)\n\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name() == DVMBasicMethodBlock.get_name(\n ):\n hooks[parent[-1].get_name()].append(child[-1])\n\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n\n cblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name()\n cblock[\"start\"] = DVMBasicMethodBlock.start\n cblock[\"notes\"] = DVMBasicMethodBlock.get_notes()\n\n cblock[\"registers\"] = mx.get_method().get_code().get_registers_size()\n cblock[\"instructions\"] = []\n\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins[\"idx\"] = ins_idx\n c_ins[\"name\"] = DVMBasicMethodBlockInstruction.get_name()\n c_ins[\"operands\"] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n\n c_ins[\"formatted_operands\"\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n\n cblock[\"instructions\"].append(c_ins)\n\n if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or\n DVMBasicMethodBlockInstruction.get_op_value() == 0x2c):\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock[\"info_next\"] = values.get_values()\n\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n\n cblock[\"info_bb\"] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock[\"info_bb\"] = 1\n\n if (last_instru.get_op_value() == 0x2b or\n last_instru.get_op_value() == 0x2c):\n cblock[\"info_bb\"] = 2\n\n cblock[\"Edge\"] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[\n DVMBasicMethodBlock.get_name()\n ]:\n ok = True\n cblock[\"Edge\"].append(hooks[DVMBasicMethodBlock.get_name(\n )][0].get_name())\n\n if not ok:\n cblock[\"Edge\"].append(DVMBasicMethodBlockChild[-1].get_name())\n\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock[\"Exceptions\"] = exception_analysis.get()\n\n reports.append(cblock)\n\n reports.extend(l)\n\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return \"0x%x\" % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\ndef object_to_bytes(obj):\n \"\"\"\n Convert a object to a bytearray or call get_raw() of the object\n if no useful type was found.\n \"\"\"\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, \"show_\" + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, \"wb\") as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return \"L\" + input.replace(\".\", \"/\") + \";\"\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace(\"/\", \"_\")\n i = i.replace(\"$\", \"_\")\n\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace(\"<\", \"\")\n i = i.replace(\">\", \"\")\n i = i.replace(\"$\", \"_\")\n\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace(\"/\", \"_\")\n i = i.replace(\";\", \"\")\n i = i.replace(\"[\", \"\")\n i = i.replace(\"(\", \"\")\n i = i.replace(\")\", \"\")\n i = i.replace(\" \", \"\")\n i = i.replace(\"$\", \"\")\n\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-ids": [
35,
62,
63,
65,
71
]
}
|
[
35,
62,
63,
65,
71
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
pos_training_path = 'dataset-1/trainset/faces'
neg_training_path = 'dataset-1/trainset/non-faces'
pos_testing_path = 'dataset-1/testset/faces'
neg_testing_path = 'dataset-1/testset/non-faces'
print('Loading training faces..')
faces_train = UT.load_images(pos_training_path)
faces_train_int = list(map(II.to_integral, faces_train))
print('..done. ' + str(len(faces_train)) +
' faces loaded.\n\nLoading non faces..')
non_faces_train = UT.load_images(neg_training_path)
non_faces_train_int = list(map(II.to_integral, non_faces_train))
print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\n')
num_classifiers = 5
min_feature_height = 6
max_feature_height = 8
min_feature_width = 6
max_feature_width = 8
classifiers = AB.learn(faces_train_int, non_faces_train_int,
num_classifiers, min_feature_height, max_feature_height,
min_feature_width, max_feature_width)
for n in range(len(classifiers)):
print(classifiers[n].type, classifiers[n].top_left, classifiers[n].
width, classifiers[n].height, classifiers[n].threshold)
print('Loading test faces')
faces_test = UT.load_images(pos_testing_path)
faces_test_int = list(map(II.to_integral, faces_test))
print(str(len(faces_test)) + ' faces loaded.\n\nLoading test non faces..')
non_faces_test = UT.load_images(neg_testing_path)
non_faces_test_int = list(map(II.to_integral, non_faces_test))
print(str(len(non_faces_test)) + ' non faces loaded.\n')
print('Testing selected classifiers..')
correct_faces = 0
correct_non_faces = 0
correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,
non_faces_test_int, classifiers)
print('..done.\n\nResult:\n Faces: ' + str(correct_faces) + '/' +
str(len(faces_test)) + ' (' + str(float(correct_faces) / len(
faces_test) * 100) + '%)\n non-Faces: ' + str(correct_non_faces) +
'/' + str(len(non_faces_test)) + ' (' + str(float(
correct_non_faces) / len(non_faces_test) * 100) + '%)')
print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +
' (' + str(float(FN) / len(faces_test) * 100) +
"""%)
False Positive Rate: """ + str(FP) + '/' + str(len(
non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) *
100) + '%)')
<|reserved_special_token_1|>
import src.integralimage as II
import src.adaboost as AB
import src.utils as UT
import numpy as np
if __name__ == '__main__':
pos_training_path = 'dataset-1/trainset/faces'
neg_training_path = 'dataset-1/trainset/non-faces'
pos_testing_path = 'dataset-1/testset/faces'
neg_testing_path = 'dataset-1/testset/non-faces'
print('Loading training faces..')
faces_train = UT.load_images(pos_training_path)
faces_train_int = list(map(II.to_integral, faces_train))
print('..done. ' + str(len(faces_train)) +
' faces loaded.\n\nLoading non faces..')
non_faces_train = UT.load_images(neg_training_path)
non_faces_train_int = list(map(II.to_integral, non_faces_train))
print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\n')
num_classifiers = 5
min_feature_height = 6
max_feature_height = 8
min_feature_width = 6
max_feature_width = 8
classifiers = AB.learn(faces_train_int, non_faces_train_int,
num_classifiers, min_feature_height, max_feature_height,
min_feature_width, max_feature_width)
for n in range(len(classifiers)):
print(classifiers[n].type, classifiers[n].top_left, classifiers[n].
width, classifiers[n].height, classifiers[n].threshold)
print('Loading test faces')
faces_test = UT.load_images(pos_testing_path)
faces_test_int = list(map(II.to_integral, faces_test))
print(str(len(faces_test)) + ' faces loaded.\n\nLoading test non faces..')
non_faces_test = UT.load_images(neg_testing_path)
non_faces_test_int = list(map(II.to_integral, non_faces_test))
print(str(len(non_faces_test)) + ' non faces loaded.\n')
print('Testing selected classifiers..')
correct_faces = 0
correct_non_faces = 0
correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,
non_faces_test_int, classifiers)
print('..done.\n\nResult:\n Faces: ' + str(correct_faces) + '/' +
str(len(faces_test)) + ' (' + str(float(correct_faces) / len(
faces_test) * 100) + '%)\n non-Faces: ' + str(correct_non_faces) +
'/' + str(len(non_faces_test)) + ' (' + str(float(
correct_non_faces) / len(non_faces_test) * 100) + '%)')
print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +
' (' + str(float(FN) / len(faces_test) * 100) +
"""%)
False Positive Rate: """ + str(FP) + '/' + str(len(
non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) *
100) + '%)')
<|reserved_special_token_1|>
import src.integralimage as II
import src.adaboost as AB
import src.utils as UT
import numpy as np
if __name__ == "__main__":
pos_training_path = 'dataset-1/trainset/faces'
neg_training_path = 'dataset-1/trainset/non-faces'
pos_testing_path = 'dataset-1/testset/faces'
neg_testing_path = 'dataset-1/testset/non-faces'
print('Loading training faces..')
faces_train = UT.load_images(pos_training_path)
faces_train_int = list(map(II.to_integral, faces_train))
print('..done. ' + str(len(faces_train)) + ' faces loaded.\n\nLoading non faces..')
non_faces_train = UT.load_images(neg_training_path)
non_faces_train_int = list(map(II.to_integral, non_faces_train))
print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\n')
#number of rounds: default is 5
num_classifiers = 5
# For performance reasons restricting feature size
min_feature_height = 6
max_feature_height = 8
min_feature_width = 6
max_feature_width = 8
#learn algorithm
classifiers = AB.learn(faces_train_int, non_faces_train_int, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width)
for n in range(len(classifiers)):
print(classifiers[n].type, classifiers[n].top_left, classifiers[n].width, classifiers[n].height, classifiers[n].threshold)
print('Loading test faces')
faces_test = UT.load_images(pos_testing_path)
faces_test_int = list(map(II.to_integral, faces_test))
print(str(len(faces_test)) + ' faces loaded.\n\nLoading test non faces..')
non_faces_test = UT.load_images(neg_testing_path)
non_faces_test_int = list(map(II.to_integral, non_faces_test))
print(str(len(non_faces_test)) + ' non faces loaded.\n')
print('Testing selected classifiers..')
correct_faces = 0
correct_non_faces = 0
correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int, non_faces_test_int, classifiers)
print('..done.\n\nResult:\n Faces: ' + str(correct_faces) + '/' + str(len(faces_test))
+ ' (' + str((float(correct_faces) / len(faces_test)) * 100) + '%)\n non-Faces: '
+ str(correct_non_faces) + '/' + str(len(non_faces_test)) + ' ('
+ str((float(correct_non_faces) / len(non_faces_test)) * 100) + '%)')
print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test))
+ ' (' + str((float(FN) / len(faces_test)) * 100) + '%)\n False Positive Rate: '
+ str(FP) + '/' + str(len(non_faces_test)) + ' ('
+ str((float(FP) / len(non_faces_test)) * 100) + '%)')
|
flexible
|
{
"blob_id": "3f4f60ff315c8e7e4637a84629894012ed13280e",
"index": 3163,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) +\n ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n num_classifiers = 5\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n classifiers = AB.learn(faces_train_int, non_faces_train_int,\n num_classifiers, min_feature_height, max_feature_height,\n min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].\n width, classifiers[n].height, classifiers[n].threshold)\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,\n non_faces_test_int, classifiers)\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' +\n str(len(faces_test)) + ' (' + str(float(correct_faces) / len(\n faces_test) * 100) + '%)\\n non-Faces: ' + str(correct_non_faces) +\n '/' + str(len(non_faces_test)) + ' (' + str(float(\n correct_non_faces) / len(non_faces_test) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +\n ' (' + str(float(FN) / len(faces_test) * 100) +\n \"\"\"%)\n False Positive Rate: \"\"\" + str(FP) + '/' + str(len(\n non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) * \n 100) + '%)')\n",
"step-3": "import src.integralimage as II\nimport src.adaboost as AB\nimport src.utils as UT\nimport numpy as np\nif __name__ == '__main__':\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) +\n ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n num_classifiers = 5\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n classifiers = AB.learn(faces_train_int, non_faces_train_int,\n num_classifiers, min_feature_height, max_feature_height,\n min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].\n width, classifiers[n].height, classifiers[n].threshold)\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,\n non_faces_test_int, classifiers)\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' +\n str(len(faces_test)) + ' (' + str(float(correct_faces) / len(\n faces_test) * 100) + '%)\\n non-Faces: ' + str(correct_non_faces) +\n '/' + str(len(non_faces_test)) + ' (' + str(float(\n correct_non_faces) / len(non_faces_test) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +\n ' (' + str(float(FN) / len(faces_test) * 100) +\n \"\"\"%)\n False Positive Rate: \"\"\" + str(FP) + '/' + str(len(\n non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) * \n 100) + '%)')\n",
"step-4": "import src.integralimage as II\nimport src.adaboost as AB\nimport src.utils as UT\nimport numpy as np \n\nif __name__ == \"__main__\":\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) + ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n\n #number of rounds: default is 5\n num_classifiers = 5\n # For performance reasons restricting feature size\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n \n #learn algorithm\n classifiers = AB.learn(faces_train_int, non_faces_train_int, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].width, classifiers[n].height, classifiers[n].threshold)\n\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n \n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int, non_faces_test_int, classifiers)\n\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' + str(len(faces_test))\n + ' (' + str((float(correct_faces) / len(faces_test)) * 100) + '%)\\n non-Faces: '\n + str(correct_non_faces) + '/' + str(len(non_faces_test)) + ' ('\n + str((float(correct_non_faces) / len(non_faces_test)) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test))\n + ' (' + str((float(FN) / len(faces_test)) * 100) + '%)\\n False Positive Rate: '\n + str(FP) + '/' + str(len(non_faces_test)) + ' ('\n + str((float(FP) / len(non_faces_test)) * 100) + '%)')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import torch
import numpy as np
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
class CNN(nn.Module):
def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,
pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,
conv_stride=1, pool_stride=2):
super(CNN, self).__init__()
self.input_channels = 4
self.fragment_length = fragment_length
self.conv_layers_num = conv_layers_num
self.conv_kernel_size = conv_kernel_size
self.pool_kernel_size = pool_kernel_size
self.conv1 = nn.Conv1d(in_channels=self.input_channels,
out_channels=self.conv_layers_num, kernel_size=self.
conv_kernel_size, stride=conv_stride, dilation=conv_dilation)
self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=
pool_stride, dilation=pool_dilation)
size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (
self.conv_kernel_size - 1) - 1) / conv_stride + 1
size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.
pool_kernel_size - 1) - 1) / pool_stride + 1
self.dropout = nn.Dropout()
self.input_fc = int(size_after_pool) * self.conv_layers_num
self.output_fc = fc_size
self.fc1 = nn.Linear(self.input_fc, self.output_fc)
self.fc2 = nn.Linear(self.output_fc, 2)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
conv_result = self.conv1(x)
relu_result = F.relu(conv_result)
pooling_result = self.pool(relu_result)
fc_input = pooling_result.view(-1, self.input_fc)
dropout_result1 = self.dropout(fc_input)
fc_result1 = self.fc1(dropout_result1)
relu_result1 = F.relu(fc_result1)
dropout_result2 = self.dropout(relu_result1)
fc_result2 = self.fc2(dropout_result2)
relu_result2 = F.relu(fc_result2)
result = self.softmax(relu_result2)
return result
|
normal
|
{
"blob_id": "415a6cf1c3f633a863851a4a407d416355398b39",
"index": 7732,
"step-1": "<mask token>\n\n\nclass CNN(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, x):\n conv_result = self.conv1(x)\n relu_result = F.relu(conv_result)\n pooling_result = self.pool(relu_result)\n fc_input = pooling_result.view(-1, self.input_fc)\n dropout_result1 = self.dropout(fc_input)\n fc_result1 = self.fc1(dropout_result1)\n relu_result1 = F.relu(fc_result1)\n dropout_result2 = self.dropout(relu_result1)\n fc_result2 = self.fc2(dropout_result2)\n relu_result2 = F.relu(fc_result2)\n result = self.softmax(relu_result2)\n return result\n",
"step-4": "import torch\nimport numpy as np\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport time\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, x):\n conv_result = self.conv1(x)\n relu_result = F.relu(conv_result)\n pooling_result = self.pool(relu_result)\n fc_input = pooling_result.view(-1, self.input_fc)\n dropout_result1 = self.dropout(fc_input)\n fc_result1 = self.fc1(dropout_result1)\n relu_result1 = F.relu(fc_result1)\n dropout_result2 = self.dropout(relu_result1)\n fc_result2 = self.fc2(dropout_result2)\n relu_result2 = F.relu(fc_result2)\n result = self.softmax(relu_result2)\n return result\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def boxes_to_obj(self, bound):
return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':
bound.vertices[0].y, 'y2': bound.vertices[2].y}
def generateTempFolder(self, prifx, src):
"""Creating temp directory.."""
print('Creating temp directory.. with src and prefix .. ', prifx, src)
temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(
':', '-'), prifx, src)
print('Temp directory created', temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print('Creating a subdirectory..')
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def OCRscan(self, imgfile):
print('Performing OCR Scan on the image ', imgfile)
with io.open(imgfile, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self, bound):
return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':
bound.vertices[0].y, 'y2': bound.vertices[2].y}
def generateTempFolder(self, prifx, src):
"""Creating temp directory.."""
print('Creating temp directory.. with src and prefix .. ', prifx, src)
temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(
':', '-'), prifx, src)
print('Temp directory created', temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print('Creating a subdirectory..')
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
credentials = service_account.Credentials.from_service_account_file(
'APIKey.json')
client = vision.ImageAnnotatorClient(credentials=credentials)
def OCRscan(self, imgfile):
print('Performing OCR Scan on the image ', imgfile)
with io.open(imgfile, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self, bound):
return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':
bound.vertices[0].y, 'y2': bound.vertices[2].y}
def generateTempFolder(self, prifx, src):
"""Creating temp directory.."""
print('Creating temp directory.. with src and prefix .. ', prifx, src)
temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(
':', '-'), prifx, src)
print('Temp directory created', temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print('Creating a subdirectory..')
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
<|reserved_special_token_1|>
from google.cloud import vision
from google.cloud.vision import types
from google.oauth2 import service_account
import os
import io
import pdf2image
import tempfile
import datetime
credentials = service_account.Credentials.from_service_account_file(
'APIKey.json')
client = vision.ImageAnnotatorClient(credentials=credentials)
def OCRscan(self, imgfile):
print('Performing OCR Scan on the image ', imgfile)
with io.open(imgfile, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self, bound):
return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':
bound.vertices[0].y, 'y2': bound.vertices[2].y}
def generateTempFolder(self, prifx, src):
"""Creating temp directory.."""
print('Creating temp directory.. with src and prefix .. ', prifx, src)
temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(
':', '-'), prifx, src)
print('Temp directory created', temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print('Creating a subdirectory..')
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
<|reserved_special_token_1|>
from google.cloud import vision
from google.cloud.vision import types
from google.oauth2 import service_account
import os
# import re
import io
import pdf2image
import tempfile
import datetime
# Google API
credentials = service_account.Credentials.from_service_account_file("APIKey.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
def OCRscan(self, imgfile):
print("Performing OCR Scan on the image ", imgfile)
with io.open(imgfile, "rb") as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self,bound):
return {'x1': bound.vertices[0].x ,'x2':bound.vertices[1].x ,
'y1':bound.vertices[0].y ,'y2':bound.vertices[2].y }
def generateTempFolder(self, prifx, src):
"Creating temp directory.."
print("Creating temp directory.. with src and prefix .. ", prifx, src)
# temp_dir = tempfile.mkdtemp(("-"+str(datetime.datetime.now()).replace(":", "-")), "PMR_Claims", self.cwd+os.sep
# + "GENERATED"+os.sep+"CLAIMS")
temp_dir = tempfile.mkdtemp(
("-"+str(datetime.datetime.now()).replace(":", "-")), prifx, src)
print("Temp directory created", temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print("Creating a subdirectory..")
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
|
flexible
|
{
"blob_id": "be69a9981fe6b53c3b9c4d2893913e4f9f7efb26",
"index": 6697,
"step-1": "<mask token>\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-3": "<mask token>\ncredentials = service_account.Credentials.from_service_account_file(\n 'APIKey.json')\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-4": "from google.cloud import vision\nfrom google.cloud.vision import types\nfrom google.oauth2 import service_account\nimport os\nimport io\nimport pdf2image\nimport tempfile\nimport datetime\ncredentials = service_account.Credentials.from_service_account_file(\n 'APIKey.json')\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-5": "from google.cloud import vision\nfrom google.cloud.vision import types\nfrom google.oauth2 import service_account\n\n\nimport os\n# import re\nimport io\n\nimport pdf2image\nimport tempfile\nimport datetime\n\n\n# Google API\ncredentials = service_account.Credentials.from_service_account_file(\"APIKey.json\")\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n\n print(\"Performing OCR Scan on the image \", imgfile)\n with io.open(imgfile, \"rb\") as image_file:\n content = image_file.read()\n\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n\n return document\n\n\ndef boxes_to_obj(self,bound):\n \n return {'x1': bound.vertices[0].x ,'x2':bound.vertices[1].x ,\n 'y1':bound.vertices[0].y ,'y2':bound.vertices[2].y }\n\n\ndef generateTempFolder(self, prifx, src):\n \"Creating temp directory..\"\n\n print(\"Creating temp directory.. with src and prefix .. \", prifx, src)\n # temp_dir = tempfile.mkdtemp((\"-\"+str(datetime.datetime.now()).replace(\":\", \"-\")), \"PMR_Claims\", self.cwd+os.sep\n # + \"GENERATED\"+os.sep+\"CLAIMS\")\n temp_dir = tempfile.mkdtemp(\n (\"-\"+str(datetime.datetime.now()).replace(\":\", \"-\")), prifx, src)\n\n print(\"Temp directory created\", temp_dir)\n\n return temp_dir\n\ndef createSubDir(self, src, subDirNameList):\n print(\"Creating a subdirectory..\")\n\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from flask import Flask, request, render_template
from random import choice, sample
app = Flask(__name__)
horoscopes = [
'your day will be awesome',
'your day will be terrific',
'your day will be fantastic',
'neato, you have a fantabulous day ahead',
'your day will be oh-so-not-meh',
'this day will be brilliant',
'looks like today is just ducky',
'I proclaim your day to be INCREDIBLE',
'this day will be wonderful',
'smash this day',
'this day shall be lovely',
'your day will be just satenacious']
@app.route('/')
def index():
"""Show the homepage and ask the user's name."""
return render_template('index.html')
@app.route('/horoscope')
def get_horoscope():
"""Give the user a horoscope"""
name = request.args.get('name')
num_horoscopes = int(request.args.get('num_horoscopes'))
show_horoscopes = request.args.get('show_horoscopes')
horoscopes_to_show = sample(horoscopes, num_horoscopes)
# predictions = ', '.join(sample(horoscopes, num_horoscopes))
return render_template(
'horoscopes.html',
name=name,
show_horoscopes=show_horoscopes,
horoscopes_to_show=horoscopes_to_show))
"""
if show_horoscopes:
return f"Hello there, {name}: {predictions}."
else:
return f"Hello there, {name}! Have a nice day!"
"""
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "09d32b48ae88b1066dd0aa435a351c4fb1fc04ec",
"index": 9759,
"step-1": "from flask import Flask, request, render_template\nfrom random import choice, sample\n\napp = Flask(__name__)\n\nhoroscopes = [\n 'your day will be awesome',\n 'your day will be terrific',\n 'your day will be fantastic',\n 'neato, you have a fantabulous day ahead',\n 'your day will be oh-so-not-meh',\n 'this day will be brilliant',\n 'looks like today is just ducky',\n 'I proclaim your day to be INCREDIBLE',\n 'this day will be wonderful',\n 'smash this day',\n 'this day shall be lovely',\n 'your day will be just satenacious']\n\n\n@app.route('/')\ndef index():\n \"\"\"Show the homepage and ask the user's name.\"\"\"\n return render_template('index.html')\n\n\n@app.route('/horoscope')\ndef get_horoscope():\n \"\"\"Give the user a horoscope\"\"\"\n name = request.args.get('name')\n num_horoscopes = int(request.args.get('num_horoscopes'))\n show_horoscopes = request.args.get('show_horoscopes')\n horoscopes_to_show = sample(horoscopes, num_horoscopes)\n # predictions = ', '.join(sample(horoscopes, num_horoscopes))\n\n return render_template(\n 'horoscopes.html',\n name=name,\n show_horoscopes=show_horoscopes,\n horoscopes_to_show=horoscopes_to_show))\n\n\"\"\"\n if show_horoscopes:\n return f\"Hello there, {name}: {predictions}.\"\n else:\n return f\"Hello there, {name}! Have a nice day!\"\n\"\"\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from final import getMood
import pickle
def get_mood(username_t,username_i):
mapping={'sadness':'0,0,255','angry':'255,0,0','happy':'0,255,0','surprise':'139,69,19','neutral':'189,183,107','fear':'255,165,0'}
#Sad: Blue, Angry: Red, Happy: Green, Surprise: Brown, Neutral:Yellow,Fear:Orange
mood=getMood(username_i,username_t)
value=mood[0][0]
print value
with open('colorfile', 'wb') as fp:
pickle.dump(mapping[value], fp)
return mood
|
normal
|
{
"blob_id": "aa4fd27382119e3b10d2b57c9b87deff32b5c1ab",
"index": 586,
"step-1": "from final import getMood\nimport pickle\ndef get_mood(username_t,username_i):\n mapping={'sadness':'0,0,255','angry':'255,0,0','happy':'0,255,0','surprise':'139,69,19','neutral':'189,183,107','fear':'255,165,0'}\n #Sad: Blue, Angry: Red, Happy: Green, Surprise: Brown, Neutral:Yellow,Fear:Orange\n \n mood=getMood(username_i,username_t)\n value=mood[0][0]\n print value\n with open('colorfile', 'wb') as fp:\n pickle.dump(mapping[value], fp)\n return mood\n \n\n \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.