')\ndef carpark(code):\n carpark = Carpark.query.filter_by(code=code).first_or_404()\n has_data = code in Carpark.get_carparks_with_data()\n # If no analytics data, return template\n if not has_data:\n return render_template('carpark.html', carpark=carpark, has_data=has_data)\n\n data = {}\n\n # Statics\n statics_file = r'app/static/carparks/{}/plots/statics.html'.format(code)\n if not os.path.exists(statics_file):\n statics_df = carpark.get_statics()\n statics_df.to_html(statics_file, float_format='%.1f')\n data['statics'] = open(statics_file).read()\n\n # Volume\n vol_file = r'app/static/carparks/{}/plots/volume.png'.format(code)\n if not os.path.exists(vol_file):\n plt.clf()\n vol_df = carpark.get_volume()\n plt.style.use('ggplot')\n ax = vol_df.plot(kind='bar', figsize=(12, 5), stacked=True, x=vol_df.index)\n ticklabels = [''] * len(vol_df.index)\n ticklabels[::7] = [item.strftime('%b %d') for item in vol_df.index[::7]]\n ax.set_xlabel('Daily Volume')\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n plt.gcf().autofmt_xdate()\n plt.savefig(vol_file, format='png')\n\n # plot = figure(plot_width=1200, plot_height=500, x_axis_type=\"datetime\")\n # plot.line(vol_df.index, vol_df.values, color='navy', alpha=0.5)\n # output_file(vol_file)\n # save(plot)\n data['volume'] = url_for('static', filename='carparks/{}/plots/volume.png'.format(code))\n\n # Occupancy\n occ_file = r'app/static/carparks/{}/plots/occupancy.png'.format(code)\n if not os.path.exists(occ_file):\n plt.clf()\n occ_chart_data = carpark.get_occupancy()\n plt.style.use('ggplot')\n fig, ax = plt.subplots(figsize=(12, 5))\n occ_chart_data.plot(ax=ax, kind='bar', stacked=True)\n from matplotlib.ticker import PercentFormatter\n ax.yaxis.set_major_formatter(PercentFormatter())\n plt.savefig(occ_file, format='png')\n data['occupancy'] = url_for('static', filename='carparks/{}/plots/occupancy.png'.format(code))\n\n # LOS\n los_file = r'app/static/carparks/{}/plots/los.png'.format(code)\n if not os.path.exists(los_file):\n plt.clf()\n los_df = carpark.get_los()\n los_dist = los_df['los_group'].value_counts()\n los_dist.plot.pie(autopct='%.1f%%', figsize=(6, 6))\n plt.savefig(los_file, format='png')\n # To Be Polished\n data['los'] = url_for('static', filename='carparks/{}/plots/los.png'.format(code))\n\n\n\n\n return render_template('carpark.html', carpark=carpark, has_data=has_data, data=data)\n\n\n@main.route('/transactions/')\ndef transactions(code):\n carpark = Carpark.query.filter_by(code=code).first_or_404()\n page = request.args.get('page', 1, type=int)\n pagination = carpark.transactions.order_by(Transaction.exit_timestamp.desc()).paginate(\n page, per_page=current_app.config['TRANSACTIONS_PER_PAGE'], error_out=False\n )\n transactions = pagination.items\n return render_template('transactions.html', carpark=carpark, transactions=transactions, pagination=pagination)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"226475709","text":"import sys\nimport pytest\n\nfrom jinja2 import Template\n\n\n@pytest.mark.skipif(sys.version_info < (3, 5),\n reason='Requires 3.5 or later')\ndef test_generator_stop():\n class X(object):\n def __getattr__(self, name):\n raise StopIteration()\n\n t = Template('a{{ bad.bar() }}b')\n with pytest.raises(RuntimeError):\n t.render(bad=X())\n","sub_path":"tests/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"251370031","text":"import os\nimport subprocess\n\nimport discord\nfrom discord.ext import commands\n\nfrom utils.checks import checks\nfrom utils.functions import pagify\n\n\nclass Core(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(description=\"List all modules on the bot\")\n async def modules(self, ctx):\n cog_list, cogs_loaded, cogs_unloaded = [], \"```diff\\n+\\t\", \"\"\n event_list, events_loaded, events_unloaded = [], \"```diff\\n+\\t\", \"\"\n cogs, events = [], []\n bot_cogs = {}\n em = discord.Embed(color=self.bot.color)\n em.set_author(name=\"Bot modules:\")\n em.set_thumbnail(url=self.bot.user.avatar_url)\n paths = [\"modules/Commands\", \"modules/Events\"]\n for path in paths:\n for file in os.listdir(path):\n if not file.endswith(\".py\"):\n pass\n else:\n if path == paths[0]:\n cog_list.append(file[:-3])\n else:\n event_list.append(file[:-3])\n for name, obj in self.bot.cogs.items():\n if \"modules.Cogs\" in str(obj):\n cogs.append(name)\n else:\n events.append(name)\n bot_cogs[\"cogs\"] = cogs\n bot_cogs[\"events\"] = events\n for k, v in bot_cogs.items():\n if k == \"cogs\":\n for cog in v:\n if cog in cog_list:\n cog_list.remove(cog)\n else:\n for event in v:\n if event in event_list:\n event_list.remove(event)\n cogs_loaded += \", \".join(bot_cogs[\"cogs\"])\n cogs_unloaded += \", \".join(cog_list)\n events_loaded += \", \".join(bot_cogs[\"events\"])\n events_unloaded += \", \".join(event_list)\n cogs_loaded += f\"\\n-\\t{cogs_unloaded}```\" if cogs_unloaded else \"```\"\n events_loaded += f\"\\n-\\t{events_unloaded}```\" if events_unloaded else \"```\"\n em.add_field(name=\"Cogs:\", value=cogs_loaded)\n em.add_field(name=\"Events:\", value=events_loaded)\n await ctx.send(embed=em)\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Load a module\")\n async def load(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @load.command(name=\"cog\", aliases=[\"c\"], description=\"Load a cog\")\n async def load_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.load_extension(f\"modules.Commands.{cog_name}\")\n except commands.ExtensionAlreadyLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is already loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} has now been loaded!\")\n\n @load.command(name=\"event\", aliases=[\"e\"], description=\"Load an event\")\n async def load_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.load_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionAlreadyLoaded:\n return await ctx.send_error(f\"Event {event_name} is already loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} has now been loaded!\")\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Unload a module\")\n async def unload(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @unload.command(name=\"cog\", aliases=[\"c\"], description=\"Unload a cog\")\n async def unload_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.unload_extension(f\"modules.Cogs.{cog_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} is now unloaded!\")\n\n @unload.command(name=\"event\", aliases=[\"e\"], description=\"Unload an event\")\n async def unload_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.unload_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Event {event_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} is now unloaded!\")\n\n @checks.is_owner()\n @commands.group(hidden=True, case_insensitive=True, description=\"Reload a module\")\n async def reload(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n if not ctx.invoked_subcommand:\n return await ctx.send_help(ctx.command)\n\n @reload.command(name=\"cog\", aliases=[\"c\"], description=\"Reload a cog\")\n async def reload_cog(self, ctx, cog_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n cog_name = cog_name.replace(\".py\", \"\")\n try:\n self.bot.reload_extension(f\"modules.Commands.{cog_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Cog {cog_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Cog {cog_name} could not be found!\")\n await ctx.send(f\"Cog {cog_name} has been reloaded!\")\n\n @reload.command(name=\"event\", aliases=[\"e\"], description=\"Reload an event\")\n async def reload_event(self, ctx, event_name: str):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n event_name = event_name.replace(\".py\", \"\")\n try:\n self.bot.reload_extension(f\"modules.Events.{event_name}\")\n except commands.ExtensionNotLoaded:\n return await ctx.send_error(f\"Event {event_name} is not loaded!\")\n except commands.ExtensionNotFound:\n return await ctx.send_error(f\"Event {event_name} could not be found!\")\n await ctx.send(f\"Event {event_name} has been reloaded!\")\n\n @checks.is_owner()\n @commands.command(hidden=True, description=\"Pull updates from git\")\n async def pull(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n paged = pagify(\n subprocess.Popen(\n [\"git\", \"pull\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n ).stdout.read().decode()\n )\n for page in paged:\n p = f\"```css\\n{page}```\"\n await ctx.send(p)\n\n @checks.is_owner()\n @commands.command(name=\"raise\", hidden=True, description=\"Raise a test exception\")\n async def _raise(self, ctx):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n await ctx.send(\"Raising a test exception..\")\n raise Exception(f\"Exception raised by {ctx.author}\")\n\n @checks.is_owner()\n @commands.command(hidden=True, description=\"Force a user to run a command\")\n async def sudo(self, ctx, user: discord.Member, *, command):\n \"\"\"{\"permissions\": {\"user\": [\"bot_owner\"], \"bot\": []}}\"\"\"\n message = ctx.message\n prefix = await self.bot.get_prefix(message)\n message.author = user\n message.content = prefix + command\n await self.bot.invoke(await self.bot.get_context(message))\n\n\ndef setup(bot):\n bot.add_cog(Core(bot))\n","sub_path":"modules/Commands/Core.py","file_name":"Core.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"429526844","text":"#!/usr/bin/env python\nimport os\nfrom distutils.core import setup\n\ndef get_files(directory, install_base):\n file_list = []\n files=os.listdir(directory)\n found_files = []\n for file in files:\n if ( os.path.isdir(directory + \"/\" + file) ):\n if ( not file == \".svn\"):\n file_list += get_files(directory + \"/\" + file, install_base)\n else:\n found_files.append(directory + \"/\" + file)\n \n if ( len(found_files) > 0 ):\n file_list.append((install_base + \"/\" + directory, found_files))\n return file_list\n\nmedia_files = get_files(\"media\", \"share/saxs/\")\n\nsetup(\n name='saxs-style-glareindark',\n version='0.0.1',\n packages=['saxs_style_glareindark'],\n package_data={'saxs_style_glareindark': ['templates/*', 'templatetags/*']},\n data_files = media_files,\n)\n","sub_path":"saxs-style-glareindark/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"42429278","text":"import graphene\nfrom graphene import relay\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphene_django import DjangoObjectType\nfrom .models import Inmate\nfrom graphql_relay import from_global_id, to_global_id\n\n\nclass InmateNode(DjangoObjectType):\n\n class Meta:\n model = Inmate\n interfaces = (relay.Node,)\n filter_fields = {\n 'id': ['exact'],\n # 'number': ['exact'],\n 'last_name': ['exact', 'icontains', 'istartswith'],\n 'first_name': ['exact', 'icontains', 'istartswith'],\n 'middle_name': ['exact', 'icontains', 'istartswith'],\n 'agency': ['exact', 'icontains', 'istartswith'],\n 'date_created': ['exact', 'icontains', 'istartswith']\n }\n row_id = graphene.Int(source='id')\n # @staticmethod\n # def resolve_row_id(self):\n\n # return self.id\n\n\nclass Query(graphene.ObjectType):\n inmate = relay.Node.Field(InmateNode)\n inmates = DjangoFilterConnectionField(InmateNode)\n\n\nclass CreateInmate(graphene.relay.ClientIDMutation):\n\n class Input:\n # number = graphene.Int()\n last_name = graphene.String()\n first_name = graphene.String()\n middle_name = graphene.String()\n agency = graphene.String()\n\n inmate = graphene.Field(InmateNode)\n\n def mutate_and_get_payload(self, info, **input):\n # row_id = graphene.Int(source='id')\n inmate = Inmate(\n # number=row_id,\n last_name=input.get('last_name'),\n first_name=input.get('first_name'),\n middle_name=input.get('middle_name'),\n agency=input.get('agency')\n )\n inmate.save()\n return CreateInmate(inmate=inmate)\n\n\nclass Mutation(graphene.AbstractType):\n create_inmate = CreateInmate.Field()\n","sub_path":"jpacks/relay-schema.py","file_name":"relay-schema.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"150530404","text":"import config\nfrom discord.ext.commands import Bot\nimport asyncio\nimport re\nimport datetime\nimport time\n\nBOT_PREFIX = ('!')\nTOKEN = config.token\n\nclass Track:\n def __init__(self, title=None, length=None, user=None, np=False):\n self.title = title\n self.length = length\n self.user = user\n self.np = np\n\n def __str__(self):\n return '{0}\\\"{1}\\\" {2} ({3})'.format('▶ ' if self.np else ' ', self.title, sec2str(self.length), self.user)\n\nclient = Bot(command_prefix=BOT_PREFIX)\n\n#--------------------#\n\n@client.event\nasync def on_ready():\n print('Logged in as {0}'.format(client.user.name))\n #await client.send_message(destination=client.get_channel('356866879392055307'), content='harlo harlo')\n\n@client.command(name = 'harlo',\n pass_context = True)\nasync def cmd_harlo(context):\n msg = 'harlo harlo {0}!!!'.format(context.message.author.mention)\n await client.say(msg)\n\n#--------------------#\n\n@client.command(name = 'start',\n pass_context = True)\nasync def cmd_start(context):\n tracklist = await update_list_from_q(context)\n\n for i, track in enumerate(tracklist):\n print('{0}. {1}'.format(i, track))\n\ndef get_status_and_update_tl(tl, q_ts, sec_remaining):\n now_ts = datetime.datetime.utcnow()\n sec_since_q = (now_ts - q_ts).total_seconds()\n\n cur_q_pos = sum(t.length for t in tl) - sec_remaining + sec_since_q\n cur_pos = cur_q_pos\n cur_index = 0\n for i, t in enumerate(tl):\n if t.length - cur_pos <= 0:\n cur_pos -= t.length\n else:\n cur_index = i\n break\n\n #print('now_ts = {0}'.format(now_ts))\n #print('total_q_len = {0}'.format(sec2str(sum(t.length for t in tl))))\n #print('sec_since_q = {0}'.format(sec2str(sec_since_q)))\n #print('sec_remaining = {0}'.format(sec2str(sec_remaining)))\n #print('cur_q_pos = {0}'.format(sec2str(cur_q_pos)))\n print('rn: {0} {1}'.format(cur_index, sec2str(cur_pos)))\n return (cur_index, cur_pos)\n\nasync def update_list_from_q(context):\n q_ts = None\n sec_remaining = None\n tl = []\n regexes = {\n 'q_title': r'.*There (?:is|are) \\*\\*(\\d+)\\*\\* tracks? with a remaining length of \\*\\*\\[([\\d:]+)\\]\\*\\* in the queue\\..*',\n 'q_page': r'^Page \\*\\*(\\d+)\\*\\* of \\*\\*(\\d+)\\*\\*\\.$',\n 'q_track': r'^`\\[\\d+\\]` (\\\\▶)?\\*\\*(.*)\\*\\* added by \\*\\*(.*)\\*\\* `\\[([\\d:]+)\\]`$',\n }\n\n channel = context.message.channel\n\n # TODO: add after to limit msgs from last X hours\n found_q = False\n async for message in client.logs_from(channel):\n # Look for ;;q msg\n match = re.search(regexes['q_title'], message.content)\n if match:\n found_q = True\n q_ts = message.timestamp\n #q_ts = datetime.datetime(2018, 11, 12, 19, 23, 5, 605000)\n #q_ts = datetime.datetime.utcnow()\n sec_remaining = str2sec(match.group(2))\n\n # Scrape ;;q line-by-line for data\n page_cur = 0\n page_tot = 0\n for line in iter(message.content.splitlines()):\n match = re.search(regexes['q_page'], line)\n if match:\n page_cur = int(match.group(1))\n page_tot = int(match.group(2))\n match = re.search(regexes['q_track'], line)\n if match:\n length = match.group(4)\n \n track = Track(title = match.group(2),\n length = str2sec(match.group(4)),\n user = match.group(3),\n np = match.group(1))\n tl.append(track)\n\n print('\\nq_time: {0}\\n'\n 'page {1}/{2}, {3} tracks, {4} remaining\\n'.format(q_ts, page_cur, page_tot, len(tl), sec2str(sec_remaining)))\n\n get_status_and_update_tl(tl, q_ts, sec_remaining)\n break\n\n if not found_q:\n await client.say('No ;;q message found. Send a ;;q and try again')\n\n return tl\n\n#--------------------#\n\ndef utc2local(utc):\n epoch = time.mktime(utc.timetuple())\n offset = datetime.datetime.fromtimestamp(epoch) - datetime.datetime.utcfromtimestamp(epoch)\n return utc + offset\n\ndef str2sec(string):\n match = re.match(r'^\\d{2}:\\d{2}$', string)\n if match:\n t = time.strptime(string, '%M:%S')\n match = re.match(r'^\\d{2}:\\d{2}:\\d{2}$', string)\n if match:\n t = time.strptime(string, '%H:%M:%S')\n return datetime.timedelta(hours=t.tm_hour, minutes=t.tm_min, seconds=t.tm_sec).total_seconds()\n\ndef sec2str(sec):\n return str(datetime.timedelta(seconds=sec))\n\nif __name__ == '__main__':\n client.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"169483523","text":"import numpy as np # linear algebra\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dense, Activation, Conv2D, Flatten, Conv1D\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom trainer.replay_buffer import ReplayBuffer\nfrom trainer.dense_nn import DenseNN\nfrom kaggle_environments.envs.halite.helpers import Board, ShipAction, ShipyardAction, Observation\n\n\ndef build_conv_dqn(lr, n_actions, input_dims, fc1_dims):\n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=8, strides=4, activation='relu',\n input_shape=(input_dims,None), data_format='channels_first'))\n model.add(Conv2D(filters=64, kernel_size=4, strides=2, activation='relu',\n data_format='channels_first'))\n model.add(Conv2D(filters=64, kernel_size=3, strides=1, activation='relu',\n data_format='channels_first'))\n model.add(Flatten())\n model.add(Dense(fc1_dims, activation='relu'))\n model.add(Dense(n_actions))\n\n model.compile(optimizer=Adam(lr=lr), loss='mean_squared_error')\n\n return model\n\n\ndef build_dense_dqn(lr, n_actions, input_dims, fc1_dims, fc2_dims, fc3_dims=8):\n model = keras.Sequential(\n [keras.layers.Dense(fc1_dims, input_shape=(input_dims,)),\n keras.layers.Activation('relu'),\n keras.layers.Dense(fc2_dims),\n keras.layers.Activation('relu'),\n keras.layers.Dense(fc3_dims),\n keras.layers.Activation('relu'),\n keras.layers.Dense(n_actions)]\n )\n\n model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss='mse')\n\n return model\n\n\nclass Agent(object):\n\n def __init__(self,\n alpha,\n gamma,\n n_actions,\n epsilon,\n batch_size,\n input_dims,\n epsilon_dec=0.985,\n epsilon_end=0.05,\n win_reward=5,\n replace=20,\n fc1_dims=16,\n fc2_dims=16,\n mem_size=100_000,\n fname='dqn_model.h5',\n verbose=False,\n agent_type='default',\n nnet_type='dense'\n ):\n \"\"\"\n gamma: discount factor\n epsilon: how often we choose the random action\n\n \"\"\"\n self.action_space = [i for i in range(n_actions)]\n self.n_actions = n_actions\n self.gamma = gamma\n self.epsilon = epsilon\n self.epsilon_end = epsilon_end\n self.epsilon_dec = epsilon_dec\n self.batch_size = batch_size\n self.model_file = fname\n self.win_reward = win_reward\n self.replace = replace\n self.learn_step = 0\n # self.state_converter = state_converter\n self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True)\n self.verbose = verbose\n self.chose_random = False\n self.agent_type = agent_type\n\n if nnet_type == 'dense':\n dense_nn = DenseNN(\n n_actions=n_actions,\n unit_scale=2,\n observation_shape=input_dims\n )\n self.q_online = dense_nn.compile('q_online', 'mse')\n self.q_offline = dense_nn.compile('q_offline', 'mse')\n else:\n self.q_online = build_conv_dqn(alpha, fc1_dims=fc1_dims, input_dims=input_dims, n_actions=n_actions)\n self.q_offline = build_conv_dqn(alpha, fc1_dims=fc1_dims, input_dims=input_dims, n_actions=n_actions)\n\n def remember(self, state, action, reward, new_state, done):\n self.memory.store_transition(state, action, reward, new_state, done)\n return reward\n\n def replace_target_network(self):\n if self.replace is not None and self.learn_step % self.replace == 0:\n self.q_offline.set_weights(self.q_online.get_weights())\n\n def get_action(\n self,\n state,\n game,\n step,\n verbose=True\n ):\n \"\"\"\n Given a particular state, select the 2 highest values actions, or 2 random actions.\n\n :param state:\n :param game: Which game number this is (used for\n :return:\n \"\"\"\n\n rand = np.random.rand()\n if rand < self.epsilon:\n top_action_index = np.random.choice(self.action_space, 1)[0]\n # see here!\n actions = np.zeros(len(self.action_space))\n actions[top_action_index] = 1\n self.chose_random = True\n else:\n state = np.array([state])\n # pass the state through the network\n # and select the best action\n\n pred = self.q_online.predict(state)\n action_values = pred[0]\n\n top_action_index = np.argmax(action_values)\n actions = np.zeros(len(self.action_space))\n\n # try:\n actions[top_action_index] = 1\n\n self.chose_random = False\n\n if verbose and ((game % 10) == 0) and (step % 10 == 0):\n print(f'Game: {game}, Step: {step}')\n print('action values')\n print(action_values)\n print('top_action_index')\n print(top_action_index)\n print('actions')\n print(actions)\n\n return top_action_index\n\n def learn(self, step_num, episode_num):\n verbose = self.verbose\n\n # print(f'Learning with agent type: {self.agent_type}')\n\n verbose = verbose and (self.memory.mem_ctr == self.batch_size)\n\n # this is a temporal difference learning method --> we learn on each step\n # when we start, do we start with random or all zeros?\n if self.memory.mem_ctr < self.batch_size:\n return\n\n self.learn_step += 1\n\n self.replace_target_network()\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('\\n ================')\n print('learning - game: {}, iteration: {}'.format(episode_num, step_num))\n print('Mean reward: {}'.format(np.mean(self.memory.reward_memory)))\n\n # Here we sample non-sequential memory. We don't want to sample sequential\n # memory because this results in correlation (23:45 in video)\n state, action, reward, new_state, done = \\\n self.memory.sample_buffer(self.batch_size)\n\n if verbose:\n print('states: {}, actions: {}, rewards: {}'.format(len(state), len(action), len(reward)))\n\n # feed set of states through the model\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Predicting q_target with input {}'.format(state.shape))\n q_network = self.q_offline.predict(state)\n if verbose:\n print('Predicting q_next with {}'.format(state.shape))\n q_next = self.q_offline.predict(new_state)\n\n q_network = q_network.copy()\n\n # this is a point of contention\n # TODO: return to this (22:00 in video)\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n\n q_network[batch_index, action] = reward + \\\n self.gamma * np.max(q_next, axis=1) * (1 - done)\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Updated q_target with shape: {}'.format(q_network.shape))\n\n self.q_online.train_on_batch(state, q_network)\n\n if verbose and ((episode_num % 10) == 0) and (step_num % 10 == 0):\n print('Training complete')\n\n self.epsilon = self.epsilon * self.epsilon_dec if self.epsilon > self.epsilon_end \\\n else self.epsilon\n\n def save_weights(self, model_path):\n if not model_path:\n model_path = self.model_file\n self.q_online.save_weights(model_path)\n\n def load_weights(self, model_path):\n if not model_path:\n model_path = self.model_file\n self.q_online.load_weights(model_path)\n","sub_path":"code/v2/rl_on_gcp/trainer/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"5578522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 14 20:17:58 2017\n\n@author: Karthikeya\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\n# Filenames as per tweet_postag.py\nst_train = [\"norm_trn_ath.csv\", \"norm_trn_cli.csv\", \"norm_trn_fem.csv\", \"norm_trn_hil.csv\", \"norm_trn_leg.csv\"]\nst_test = [\"norm_tst_ath.csv\", \"norm_tst_cli.csv\", \"norm_tst_fem.csv\", \"norm_tst_hil.csv\", \"norm_tst_leg.csv\"]\nacc_st = []\nfor i in range(5):\n trn_f = st_train[i]\n tst_f = st_test[i]\n \n train = pd.read_csv(trn_f, engine='python')\n test = pd.read_csv(tst_f, engine='python')\n \n train_features = train.iloc[:-1,:-1]\n train_target = train.iloc[:-1,-1]\n \n test_features = test.iloc[:-1,:-1]\n test_target = test.iloc[:-1,-1]\n \n clf = RandomForestClassifier()\n clf = clf.fit(train_features, train_target)\n \n pred_target = clf.predict(test_features)\n acc_st.append(accuracy_score(test_target, pred_target))\n print(accuracy_score(test_target, pred_target))","sub_path":"stance_random_forests.py","file_name":"stance_random_forests.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"590457472","text":"\"\"\"Wrapper code to test flatten functions.\r\n\"\"\"\r\nfrom compiler.ast import flatten\r\nimport random\r\nimport sys\r\nimport timeit\r\n\r\nfrom my_flatten import my_flatten\r\n\r\nrandom.seed(\"listoflists\")\r\n\r\nN_ITEMS = 10 ** 2\r\nTIMEIT_ITERATIONS = 10 ** 2\r\n\r\nrand_digit = lambda: random.randint(0,9)\r\ngo_deeper = lambda: random.choice([True, False])\r\n\r\n\r\ndef rand_list(curr_depth, max_len=5, max_depth=5):\r\n list_ = []\r\n for _ in range(random.choice(range(max_len + 1))):\r\n if go_deeper() and curr_depth < max_depth:\r\n list_.append(rand_list(curr_depth + 1))\r\n else:\r\n list_.append(rand_digit())\r\n return list_\r\n\r\n\r\ndef rand_list_of_lists(n_items):\r\n list_of_lists = []\r\n for _ in range(n_items):\r\n list_of_lists.append(rand_list(0))\r\n return list_of_lists\r\n\r\nif __name__ == \"__main__\":\r\n setup = \"\"\"\r\nfrom flatten_timeit import rand_list, rand_list_of_lists\r\nfrom my_flatten import my_flatten\r\n\r\nlol = rand_list_of_lists({0})\r\n\"\"\".format(N_ITEMS)\r\n stmt = \"my_flatten(lol)\"\r\n\r\n # check my_flatten first\r\n lol = rand_list_of_lists(N_ITEMS)\r\n assert flatten(lol) == my_flatten(lol), \"Your my_flatten did not flatten the list of lists properly.\"\r\n\r\n # time my_flatten second\r\n sys.stdout.write(\"Your my_flatten function took: {0} seconds to run {1} times.\\n\".format(\r\n timeit.timeit(stmt, setup=setup, number=TIMEIT_ITERATIONS), TIMEIT_ITERATIONS))\r\n","sub_path":"iterative/flatten_timeit.py","file_name":"flatten_timeit.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"549234868","text":"#!/usr/bin/env python\r\n# -*- coding: cp1251 -*-\r\n#\r\n# grep logs by max procedures timings, threads activities processed\r\n\r\n# return file system objects list\r\ndef getFsoList(fso_path):\r\n\r\n import os, time\r\n\r\n files = [os.path.join(fso_path, f) for f in os.listdir(fso_path)]\r\n\r\n # sort list by file time modified\r\n return sorted(files, key=lambda x: time.ctime(os.path.getmtime(x)))\r\n\r\n#\r\ndef miner(fso_list):\r\n\t\r\n import re\r\n from datetime import datetime\r\n from datetime import timedelta\r\n\r\n grape_di = {}\r\n result_li = []\r\n\r\n for fso_name in fso_list:\r\n f = open(fso_name, 'r')\r\n print (fso_name)\r\n for line in f:\r\n time_str = re.search(r'(^\\d{2}:\\d{2}:\\d{2},\\d{3})(.*\\.)(0x\\d{2}) - (В.*ходные) параметры вызова .*', line)\r\n if time_str:\r\n th_time = datetime.strptime(time_str.group(1), '%H:%M:%S,%f')\r\n th_num = int(time_str.group(3)[2:])\r\n th_queue = time_str.group(2)\r\n if time_str.group(4) == 'Входные':\r\n if th_num not in grape_di.keys():\r\n grape_di[th_num] = [[th_time, th_queue]]\r\n else:\r\n grape_di[th_num].append([th_time, th_queue])\r\n if time_str.group(4) == 'Выходные':\r\n if th_num in grape_di.keys():\r\n if grape_di[th_num] != []:\r\n for i in range(len(grape_di[th_num]),0,-1):\r\n if grape_di[th_num][i-1][1] == th_queue:\r\n last_time = grape_di[th_num][i-1][0]\r\n del grape_di[th_num][i-1]\r\n result_li.append(['{:%H:%M:%S}'.format(last_time), (th_time - last_time).seconds, th_num])\r\n break\r\n\r\n return sorted(result_li), max(grape_di.keys())\r\n\r\n\r\n# --select max(proc_timing) from (init_time, proc_time, th_num) group by init_time, th_num\r\ndef group_by(in_list):\r\n\r\n from itertools import groupby\r\n\r\n# for key, group in groupby(in_list, lambda x: x[0]):\r\n# print (key)\r\n# print (max(group, key=lambda x: x[1]))\r\n\r\n return [[max(group, key=lambda x: x[1])] for key, group in groupby(in_list, lambda x: x[0])]\r\n\r\n# convert to excel view\r\ndef to_excel(in_list, max_th):\r\n\r\n out_list = []\r\n\r\n for i in in_list:\r\n tmp = [i[0][0]] + [''] * (max_th+1)\r\n tmp[i[0][2]+1] = str(i[0][1])\r\n out_list.append(tmp)\r\n\r\n return out_list\r\n\r\n# main module\r\ndef main():\r\n\r\n# inpath = 'c:/project1/grep/test4'\r\n# inpath = 'c:/project1/grep/test2'\r\n inpath = 'c:/project1/grep/in'\r\n\r\n # get files list\r\n in_fso_list = getFsoList(inpath)\r\n\r\n a,m = miner(in_fso_list)\r\n# print ('\\nthis is extractor action!')\r\n# for i in a: print (i)\r\n\r\n b = group_by(a)\r\n# print ('\\nthis is group by action!')\r\n# for i in b: print (i)\r\n \r\n c = to_excel(b,m)\r\n print ('\\nthis is excel view!')\r\n for i in c: print (i)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # main module call\r\n main()\r\n","sub_path":"src/grepit4_3.py","file_name":"grepit4_3.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"635378945","text":"from Karumanchi.Queue import Queue\n\ndef rearrange(input_que):\n temp_que = Queue.Queue1()\n is_length_odd = True if input_que.size%2 ==1 else False\n mid = input_que.size//2\n for i in range(mid):\n temp_que.enqueue(input_que.dequeue())\n while not temp_que.is_empty():\n input_que.enqueue(temp_que.dequeue())\n input_que.enqueue(input_que.dequeue())\n if is_length_odd:\n input_que.enqueue(input_que.dequeue())\n return input_que\n\nif __name__==\"__main__\":\n que = Queue.Queue1()\n for i in range(11,22):\n que.enqueue(i)\n rearrange(que)\n while not que.is_empty():\n print(que.dequeue())","sub_path":"Karumanchi/Queue/Rearrange.py","file_name":"Rearrange.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"393804757","text":"from pathlib import Path\nimport pytest\nimport subprocess\n\nTEST_CASES = [\n # Skip modules tests\n # \"../day3/modules/solutions/exercise1/exercise1.py\",\n # \"../day3/modules/solutions/exercise2/__init__.py\",\n # \"../day3/modules/solutions/exercise2/mod1.py\",\n # \"../day3/modules/solutions/exercise2/mod2.py\",\n # \"../day3/modules/solutions/exercise2/mod3.py\",\n # Skip linting tests\n # \"../day3/linting/solutions/exercise1_pylint.py\",\n # \"../day3/linting/solutions/devices.py\",\n # \"../day3/linting/solutions/exercise1_pep8.py\",\n # \"../day3/linting/solutions/exercise2.py\",\n \"../day3/parsers/solutions/exercise2.py\",\n \"../day3/parsers/solutions/exercise3.py\",\n \"../day3/parsers/solutions/exercise4.py\",\n \"../day3/serialization/solutions/exercise1.py\",\n \"../day3/serialization/solutions/exercise2.py\",\n \"../day3/serialization/solutions/exercise3.py\",\n \"../day3/data_struct/solutions/exercise1.py\",\n \"../day3/api/solutions/exercise1.py\",\n \"../day3/api/solutions/exercise2.py\",\n \"../day3/nxapi/solutions/exercise1.py\",\n \"../day3/nxapi/solutions/exercise2.py\",\n \"../day3/recap/solutions/exercise1.py\",\n]\n\n\ndef subprocess_runner(cmd_list, exercise_dir):\n with subprocess.Popen(\n cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=exercise_dir\n ) as proc:\n std_out, std_err = proc.communicate()\n return (std_out.decode(), std_err.decode(), proc.returncode)\n\n\ndef test_parsers_ex1():\n \"\"\"\n Execute textfsm.py:\n\n $ textfsm.py exercise1.tpl ex1_show_int_status.txt\n \"\"\"\n cmd_list = [\"textfsm.py\", \"exercise1.tpl\", \"ex1_show_int_status.txt\"]\n script_dir = \"../day3/parsers/solutions/\"\n std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)\n assert return_code == 0\n assert std_err == \"\"\n\n\n@pytest.mark.parametrize(\"test_case\", TEST_CASES)\ndef test_runner(test_case):\n path_obj = Path(test_case)\n python_script = path_obj.name\n script_dir = path_obj.parents[0]\n cmd_list = [\"python\", python_script]\n std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)\n assert return_code == 0\n assert std_err == \"\"\n","sub_path":"tests/test_day3.py","file_name":"test_day3.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"236834479","text":"# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nfrom bancochile_cl.items import BancochileClItem\nfrom datetime import datetime\nfrom fuzzywuzzy import process\nfrom itertools import zip_longest\n\n\nclass BancoChileSpider(scrapy.Spider):\n\n # Name of the spider\n name = 'banco_chile'\n allowed_domains = ['bancochile.cl']\n \n # The 6 links holding all the required benefits\n start_urls = [\n 'https://ww3.bancochile.cl/wps/wcm/connect/Personas/Portal/programa-travel/beneficios/vestuario-calzado/',\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/salud-y-belleza/portada\",\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/hogar/hogar\",\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/beneficios/servicios/portada\",\n 'https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/restaurantes/portada',\n \"https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/entretencion/portada\"\n ]\n # Iterates over all start urls\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url=url, callback=self.parse, meta={\"from_url\": url})\n\n # Creates absolute links for parse_page to crawl, extract and output\n # Gets the category name and passes it the parse_page as meta \n def parse(self, response):\n\n from_url = response.meta['from_url']\n base_url = 'https://ww3.bancochile.cl'\n\n # Restaurantes benefits has different html structure so the url must be checked first\n if from_url == 'https://ww3.bancochile.cl/wps/wcm/connect/personas/portal/programa-travel/panoramas/restaurantes/portada':\n links = response.xpath('//div[@class=\"content\"]/div/a/@href').extract()\n else:\n links = response.xpath('//div[@class=\"benef-cont\"]/a/@href').extract()\n\n # The category to be transfered as meta data to parse_page()\n categoria = response.xpath('//head/title/text()').extract_first()\n\n for i in links:\n # A URL from the bancochile website for buying tickets. Irrelevant to the other pages for scraping\n cine_url_to_avoid = \"/wps/wcm/connect/personas/portal/programa-travel/panoramas/entretencion/cines\"\n if i == cine_url_to_avoid:\n continue\n else:\n url = base_url + i\n yield scrapy.Request(url = url, callback=self.parse_page, meta={\"Categoria\": categoria})\n\n\n def parse_page(self, response):\n \n # TODO Logo for banco de chile\n pais = \"Chile\"\n\n # The logo of the benefit. Is a list since image_urls requires it being a list\n benefit_logo = ['https://ww3.bancochile.cl' + response.xpath('//div[@class=\"content\"]/div[@class=\"content-left\"]//@src').extract_first()]\n\n # Regex patterns to match available phone numbers. Since there are different possibilities of written numbers, \n # different patterns are implemented and compiled as one\n phone_regex = re.compile(r'(\\d{11})|(\\d{10})|(\\d{9})|(\\d{8})|(\\d\\s\\d{8})|(\\d{2}\\s\\d{7})|(\\+\\d{2}\\-\\d\\-\\s\\d{4}\\-\\d{4})|(\\d{3}\\s\\d{6})|(\\(\\d\\)\\d{8})|(\\d{5}\\s\\d{4})|(\\(\\+\\d{3}\\)\\s\\d{8})|(\\(\\+\\d{4}\\)\\s\\d{4}\\s\\d{3})|(\\(\\+\\d{4}\\)\\s\\d{3}\\s\\d{4})|(\\+\\d{3}\\s\\d{8})|(\\(\\+\\d{4}\\)\\s\\d{3}\\s\\d{3})|(\\+\\d{2}\\-\\d{2}\\-\\d{6})|(\\+\\d{2}\\-\\d{2}\\-\\d{7})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{2}\\-\\d{2})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{4})|(\\(\\+\\d{3}\\)\\s\\d{4}\\s\\d{4})|(\\+\\d{2}\\-\\d\\-\\d{4}\\-\\d{4})|(\\+\\d{2}\\-\\d{2}\\-\\d{3}\\-\\d{3})|(\\(\\d\\)\\d{8})|(\\d{5}\\s\\d{4})|(\\d{2}\\-\\d{3}\\-\\d{3})|(\\+\\d{2}\\-\\d{2}\\–\\d{6})|(\\d{3}\\s\\d{4}\\s\\d{4})|(\\+\\d{2}\\s\\d\\s\\d{4}\\s\\d{4})')\n \n # Create a dictionary with each month and its corresponding number \n # Will be used if a month is located anywhere in the \"Valid until\" text and substituted with its number so the date in \n # starting and ending columns is properly outputted\n month_dict = {\n 'enero': \"01\" , 'febrero': \"02\", 'marzo': \"03\", 'abril': \"04\", 'mayo': \"05\", 'junio': \"06\", \n 'julio': \"07\", 'agosto': \"08\", 'septiembre': \"09\", 'octubre': \"10\", 'noviembre': \"11\", 'diciembre': \"12\"}\n # Exctract the name of the benefit\n nombre_del_beneficio = response.xpath('//h3//text()').extract()\n\n # We get the name of the business from the url \n nombre_del_comercio = response.url.split('/')[-1]\n\n # Extract the retailer decription\n descripcion_del_comercio = response.xpath('//section[@class=\"section-grey benef-ficha\"]/a/text()').extract_first()\n if response.xpath('//div[@class=\"content-right\"]/div/div[@class=\"ConDescu\"]'):\n terminos_y_condiciones_del_beneficio = response.xpath('//div[@class=\"content-right\"]/div/div/ul/li//text()').extract()\n \n else:\n terminos_y_condiciones_del_beneficio = response.xpath('//div[@class=\"content-right\"]/ul/li//text()').extract()\n\n # Extracts the whole paragraph ( to search for a percentage sign - % ). If unavailable - assigns the terminos_y_condiciones to it\n if not response.xpath('//div[@class=\"content-right\"]/p//text()'):\n percentage_paragraph = terminos_y_condiciones_del_beneficio\n else:\n percentage_paragraph = response.xpath('//div[@class=\"content-right\"]/p//text()').extract()\n \n # Get the terminos y condiciones text \n # Also to search for % sign or email below in the iteration\n email = \"\"\n \n # Assign an empty string variable for the type of benefit\n tipo_de_beneficio = \"\"\n\n # Assign an empty string for the retail website URL \n web_comercio = \"\"\n # String searching is implemented below so the index of the % sign is needed if found\n index_of_percent = 0\n first_index = 0\n \n # Iterate over the name of the benefit, the paragraph and the unordered list to search for the % sign\n # If found in any of them, extract 2 previous indexes to get the whole number % - 20%, 50% etc.\n # Or if \"Dólares-Premio\" found in any of them it will be assigned as tipo_de_beneficio\n tipo_found = False\n for i,j,k in zip_longest(nombre_del_beneficio, percentage_paragraph, terminos_y_condiciones_del_beneficio):\n if i:\n if '%' in i:\n tipo_found = True\n index_of_percent = i.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(i[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in i:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n if not tipo_found:\n if j:\n if '%' in j:\n tipo_found = True\n index_of_percent = j.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(j[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in j:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n if not tipo_found:\n if k:\n if '%' in k:\n tipo_found = True\n index_of_percent = k.index('%')\n first_index = index_of_percent - 2\n tipo_de_beneficio = str(k[first_index: index_of_percent + 1]) + \" de descuento\"\n elif \"Dólares-Premio\" in k:\n tipo_found = True\n tipo_de_beneficio = nombre_del_beneficio\n\n # Iterate over all of the 3 for a website URL\n # Iterate over the name of the benefit, the paragraph and the unordered list to search for the website\n # If found in any - the string will be split so only the website is extracted\n # Assign it to its variable accordingly afterwards\n for i,j,k in zip_longest(nombre_del_beneficio, percentage_paragraph, terminos_y_condiciones_del_beneficio):\n if i:\n if 'www' in i:\n new_list = i.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n elif j:\n if 'www' in j:\n new_list = j.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n elif k:\n if 'www' in k:\n new_list = k.split()\n for string in new_list:\n if 'www' in string:\n web_comercio = string\n\n # Exctract the benefit description and remove trailing newlines and whitespaces from the string\n descripcion_del_beneficio = response.xpath('//div[@class=\"content-bottom\"]/p//text()').extract_first().strip()\n\n # Find where in descripcion_del_beneficio is the first '2020' or '2021' located. It is the only constant found in each benefit's description string\n # Split the descripcion_del_beneficio on '2020' or on '2021' and take the first part\n year = \"\"\n promocion_valida = \"\"\n if '2020' in descripcion_del_beneficio:\n promocion_valida = descripcion_del_beneficio.split('2020')[0]\n year = '2020'\n elif '2021'in descripcion_del_beneficio:\n promocion_valida = descripcion_del_beneficio.split('2021')[0]\n year = '2021'\n # Check for 'desde ' - if found - there is a starting date. So far there are only 2 possiblities listed on the website for the promocion_valida string:\n # Either it has 'desde ' or it has 'hasta ' so we search for either one of them\n # If 'desde' is found - split the string on it, take the second part and make a list out of it for iteration below\n if 'desde ' in promocion_valida:\n promocion_valida = promocion_valida.split('desde ')[1].split()\n \n # If 'hasta ' is found - split the string on it, take the second part and make a list out of it for iteration below\n elif 'hasta ' in promocion_valida:\n promocion_valida = promocion_valida.split('hasta ')[1].split()\n\n # Search for 'el', 'al', 'de' and 'del' keywords and if found - remove them so only the actual dates are left in the list( promocion_valida )\n for i in promocion_valida:\n if 'el' in promocion_valida:\n promocion_valida.remove('el')\n elif 'al' in promocion_valida:\n promocion_valida.remove('al')\n elif 'de' in promocion_valida:\n promocion_valida.remove('de')\n elif 'del' in promocion_valida:\n promocion_valida.remove('del')\n\n # Substitute each month found in promocion_valida list with its corresponding number\n for k, v in month_dict.items():\n if k in promocion_valida:\n subs = promocion_valida.index(k)\n promocion_valida[subs] = str(month_dict[k])\n\n # Assign fetcha_de_initio to today's date. If another starting date is found in the promocion valida list, this will be substituted.\n fecha_de_initio = datetime.today().strftime('%Y-%m-%d')\n fecha_de_termino = \"\"\n\n # Checks the length of promocion valida:\n # If the length == 4 then there is a month for fecha_de_initio and a month for fecha_de_termino\n # If the length == 3 then both fecha_de_initio and fecha_de_termino have the same month\n # If the length == 2, fecha_de_initio stays set to today's date and only fecha_de_termino is set according to the benefit's information\n if len(promocion_valida) == 2:\n fecha_de_termino = f'{year}-' + promocion_valida[1] + '-' + promocion_valida[0]\n elif len(promocion_valida) == 3:\n fecha_de_initio = f'{year}-' + promocion_valida[2] + '-' + promocion_valida[0]\n fecha_de_termino = f'{year}-' + promocion_valida[2] + '-' + promocion_valida[1]\n elif len(promocion_valida) == 4:\n fecha_de_initio = f'{year}-' + promocion_valida[1] + '-' + promocion_valida[0]\n fecha_de_termino = f'{year}-' + promocion_valida[3] + '-' + promocion_valida[2]\n\n # Extract the type of program\n programa_de_beneficios = response.xpath('//div[@class=\"navbar-header\"]//img/@title').extract_first()\n # Company of the Benefit Program\n empresa_del_programa = \"Banco de Chile\"\n\n # A nested dictionary holding all counties with their respective cities and regions in Chile\n chile_communas = {\n \"Arica and Parinacota\": {\n \"Arica\": [\"Camarones\", \"Arica\"],\n \"Parinacota\": [\"Putre\", \"General Lagos\"]\n },\n \"Tarapacá\": {\n \"Iquique\": [\"Iquique\", \"Alto Hospicio\"],\n \"Tamarugal\": [\"Pozo Almonte\", \"Pica\", \"Huara\", \"Colchane\", \"Camiña\"]\n },\n \"Antofagasta\": {\n \"Antofagasta\": [\"Taltal\", \"Sierra Gorda\", \"Mejillones\", \"Antofagasta\"],\n \"El Loa\": [\"San Pedro de Atacama\", \"Ollagüe\", \"Calama\"],\n \"Tocopilla\": [\"Tocopilla\", \"María Elena\"]\n },\n \"Atacama\": {\n \"Chañaral\": [\"Diego de Almagro\", \"Chañaral\"],\n \"Copiapó\": [\"Tierra Amarilla\", \"Copiapó\", \"Caldera\"],\n \"Huasco\": [\"Vallenar\", \"Huasco\", \"Freirina\", \"Alto del Carmen\"]\n },\n \"Coquimbo\": {\n \"Choapa\": [\"Salamanca\", \"Los Vilos\", \"Illapel\", \"Canela\"],\n \"Elqui\": [\"Vicuña\", \"Paiguano\", \"La Serena\", \"La Higuera\", \"Coquimbo\", \"Andacollo\"],\n \"Limarí\": [\"Río Hurtado\", \"Punitaqui\", \"Ovalle\", \"Monte Patria\", \"Combarbalá\"]\n },\n \"Valparaíso\": {\n \"Isla de Pascua\": [\"Isla de Pascua\"],\n \"Los Andes\": [\"San Esteban\", \"Rinconada\", \"Los Andes\", \"Calle Larga\"],\n \"Marga Marga\": [\"Villa Alemana\", \"Quilpué\", \"Limache\", \"Olmué\"],\n \"Petorca\": [\"Zapallar\", \"Petorca\", \"Papudo\", \"La Ligua\", \"Cabildo\"],\n \"Quillota\": [\"Quillota\", \"Nogales\", \"La Cruz\", \"La Calera\", \"Hijuelas\"],\n \"San Antonio\": [\"Santo Domingo\", \"San Antonio\", \"El Tabo\", \"El Quisco\", \"Cartagena\", \"Algarrobo\"],\n \"San Felipe\": [\"Santa María\", \"San Felipe\", \"Putaendo\", \"Panquehue\", \"Llaillay\", \"Catemu\"],\n \"Valparaíso\": [\"Viña del Mar\", \"Valparaíso\", \"Quintero\", \"Puchuncaví\", \"Concón\", \"Juan Fernández\", \"Casablanca\"]\n },\n \"Metropolitana\": {\n \"Chacabuco\": [\"Tiltil\", \"Lampa\", \"Colina\"],\n \"Cordillera\": [\"San José de Maipo\", \"Puente Alto\", \"Pirque\"],\n \"Maipo\": [\"San Bernardo\", \"Paine\", \"Calera de Tango\", \"Buin\"],\n \"Melipilla\": [\"San Pedro\", \"Melipilla\", \"María Pinto\", \"Curacaví\", \"Alhué\"],\n \"Santiago\": [\n \"Vitacura\", \"Santiago\", \"San Ramón\", \"San Miguel\", \"San Joaquín\", \"Renca\", \"Recoleta\", \"Quinta Normal\", \"Quilicura\", \"Pudahuel\",\n \"Providencia\", \"Peñalolén\", \"Pedro Aguirre Cerda\", \"Ñuñoa\", \"Maipú\", \"Macul\", \"Lo Prado\", \"Lo Espejo\", \"Lo Barnechea\", \"Las Condes\", \n \"La Reina\", \"La Pintana\", \"La Granja\", \"La Florida\", \"La Cisterna\", \"Independencia\", \"Huechuraba\", \"Estación Central\", \"El Bosque\", \"Conchalí\",\n \"Cerro Navia\", \"Cerrillos\"\n ],\n \"Talagante\":[\"Talagante\", \"Peñaflor\", \"Padre Hurtado\", \"Isla de Maipo\", \"El Monte\"]\n },\n \"O'Higgins\": {\n \"Cachapoal\": [\n \"San Vicente\", \"Requínoa\", \"Rengo\", \"Rancagua\", \"Quinta de Tilcoco\", \"Pichidegua\", \"Peumo\", \"Olivar\", \"Mostazal\", \n \"Malloa\", \"Machalí\", \"Las Cabras\", \"Graneros\", \"Doñihue\", \"Coltauco\", \"Coinco\", \"Codegua\"\n ],\n \"Cardenal Caro\": [\"Pichilemu\", \"Paredones\", \"Navidad\", \"Marchihue\", \"Litueche\", \"La Estrella\"],\n \"Colchagua\": [\"Santa Cruz\", \"San Fernando\", \"Pumanque\", \"Placilla\", \"Peralillo\", \"Palmilla\", \"Nancagua\", \"Lolol\", \"Chimbarongo\", \"Chépica\"]\n },\n \"Maule\": {\n \"Cauquenes\": [\"Pelluhue\", \"Chanco\", \"Cauquenes\"],\n \"Curicó\": [\"Vichuquén\", \"Teno\", \"Sagrada Familia\", \"Romeral\", \"Rauco\", \"Molina\", \"Licantén\", \"Hualañé\", \"Curicó\"],\n \"Linares\": [\"Yerbas Buenas\", \"Villa Alegre\", \"San Javier\", \"Retiro\", \"Parral\", \"Longaví\", \"Linares\", \"Colbún\"],\n \"Talca\": [\"Talca\", \"San Rafael\", \"San Clemente\", \"Río Claro\", \"Pencahue\", \"Pelarco\", \"Maule\", \"Empedrado\", \"Curepto\", \"Constitución\"]\n },\n \"Ñuble\": {\n \"Diguillín\": [\"Chillán Viejo\", \"Chillán\", \"Bulnes\", \"El Carmen\", \"Pemuco\", \"Pinto\", \"Quillón\", \"San Ignacio\", \"Yungay\"],\n \"Itata\": [\"Cobquecura\", \"Coelemu\", \"Ninhue\", \"Portezuelo\", \"Quirihue\", \"Ránquil\", \"Treguaco\"],\n \"Punilla\": [\"Coihueco\", \"Ñiquén\", \"San Carlos\", \"San Fabián\", \"San Nicolás\"]\n },\n \"Biobío\": {\n \"Arauco\": [\"Tirúa\", \"Los Álamos\", \"Lebu\", \"Curanilahue\", \"Contulmo\", \"Cañete\", \"Arauco\"],\n \"Biobío\": [\n \"Yumbel\", \"Tucapel\", \"Santa Bárbara\", \"San Rosendo\", \"Quilleco\", \"Quilaco\", \"Negrete\", \n \"Nacimiento\", \"Mulchén\", \"Los Ángeles\", \"Laja\", \"Cabrero\", \"Antuco\", \"Alto Biobío\"\n ],\n \"Concepción\": [\n \"Tomé\", \"Talcahuano\", \"Santa Juana\", \"San Pedro de la Paz\", \"Penco\", \"Lota\", \n \"Hualqui\", \"Hualpén\", \"Florida\", \"Coronel\", \"Concepción\", \"Chiguayante\"\n ]\n },\n \"Araucanía\": {\n \"Cautín\": [\n \"Villarrica\", \"Vilcún\", \"Toltén\", \"Teodoro Schmidt\", \"Temuco\", \"Saavedra\", \"Pucón\", \n \"Pitrufquén\", \"Perquenco\", \"Padre Las Casas\", \"Nueva Imperial\", \"Melipeuco\", \"Loncoche\", \"Lautaro\",\n \"Gorbea\", \"Galvarino\", \"Freire\", \"Curarrehue\", \"Cunco\", \"Cholchol\", \"Carahue\"\n ],\n \"Malleco\": [\"Victoria\", \"Traiguén\", \"Renaico\", \"Purén\", \"Lumaco\", \"Los Sauces\", \"Lonquimay\", \"Ercilla\", \"Curacautín\", \"Collipulli\", \"Angol\"],\n },\n \"Los Ríos\": {\n \"Ranco\": [\"Río Bueno\", \"Lago Ranco\", \"La Unión\", \"Futrono\"],\n \"Valdivia\": [\"Valdivia\", \"Panguipulli\", \"Paillaco\", \"Mariquina\", \"Máfil\", \"Los Lagos\", \"Lanco\", \"Corral\"]\n },\n \"Los Lagos\": {\n \"Chiloé\": [\"Quinchao\", \"Quemchi\", \"Quellón\", \"Queilén\", \"Puqueldón\", \"Dalcahue\", \"Curaco de Vélez\", \"Chonchi\", \"Castro\", \"Ancud\"],\n \"Llanquihue\": [\"Puerto Varas\", \"Puerto Montt\", \"Maullín\", \"Los Muermos\", \"Llanquihue\", \"Frutillar\", \"Fresia\", \"Cochamó\", \"Calbuco\"],\n \"Osorno\": [\"San Pablo\", \"San Juan de la Costa\", \"Río Negro\", \"Puyehue\", \"Purranque\", \"Puerto Octay\", \"Osorno\"],\n \"Palena\": [\"Palena\", \"Hualaihué\", \"Futaleufú\", \"Chaitén\"]\n },\n \"Aysén\": {\n \"Aysén\": [\"Guaitecas\", \"Cisnes\", \"Aysén\"],\n \"Capitán Prat\": [\"Tortel\", \"O'Higgins\", \"Cochrane\"],\n \"Coyhaique\": [\"Lago Verde\", \"Coihaique\"],\n \"General Carrera\": [\"Río Ibáñez\", \"Chile Chico\"]\n },\n \"Magallanes\": {\n \"Antártica Chilena\": [\"Cabo de Hornos\", \"Antártica\"],\n \"Magallanes\": [\"San Gregorio\", \"Río Verde\", \"Punta Arenas\", \"Laguna Blanca\"],\n \"Tierra del Fuego\": [\"Timaukel\", \"Primavera\", \"Porvenir\"],\n \"Última Esperanza\": [\"Torres del Paine\", \"Natales\"]\n }\n\n }\n\n all_cities = []\n # Gets all cities in a list for lighter search\n for d1, d2 in chile_communas.items():\n for k, v in d2.items():\n all_cities.append(k)\n\n # Gets the html directions box\n direction_box = \"\"\n if response.xpath('//div[@class=\"contBlokAcordeon\"]'):\n direction_box = response.xpath('//div[@class=\"contBlokAcordeon\"]')\n\n # Checks how many \"Direcciones\" are listed so one can iterate over them - usually one or two ( ex. Santiago, Regiones )\n # Gets the direction name to run a light city search instead of full - i.e. if \"Santiago\" --> get it's counties and only iterate over them\n first_direction_name = \"\"\n first_direction_box = \"\"\n second_direction_name = \"\"\n second_direction_box = \"\"\n if direction_box:\n \n if len(direction_box) == 1:\n first_direction_box = direction_box[0].xpath('.//ul/li')\n first_direction_name = direction_box[0].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first() # Get the first name ( ex Santiago )\n elif len(direction_box) == 2:\n first_direction_box = direction_box[0].xpath('.//ul/li')\n first_direction_name = direction_box[0].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first()\n second_direction_box = direction_box[1].xpath('.//ul/li')\n second_direction_name = direction_box[1].xpath('./div[@class=\"ContTitulo\"]/h2/text()').extract_first() # Get the second name (ex. Regiones )\n\n # Assign an empty value for a city\n city_found = False\n # A list to check how many cities match the search below\n total_cities_found = []\n # A list to hold the addresses\n address_list = []\n # A dictionary to hold the counties if a city is matched\n my_dict = {}\n \n # Checks if \"Direcciones\" exists on the page\n if first_direction_name:\n\n # Assigns addresses and strips them of empty spaces\n for i in first_direction_box:\n address = i.xpath('.//text()').extract()\n address = [j.strip() for j in address]\n address_list.append(address)\n\n # Flattens the nested lists and concatenate the strings in the addresses lists so the phone numbers correspond to their listed address\n joined_list = [' '.join(x) for x in address_list]\n\n # A list of words for string matching\n phone_words = [\"Teléfono:\" , \"Reservas al fono\", \"Teléfono\", \"Reservas:\", \"Reservas al\", \"Reservas\", \"Tel\"]\n\n # Creates a dictionary to add the address with it's corresponding number if available\n address_phone_dict = {}\n\n # Used below to check if a city is matched\n city_match = \"\"\n\n # Checks for a phone-related word and website - if found - gets the phone number and/ or website name\n for word in phone_words:\n for address in joined_list:\n\n # Checks for a website\n if 'www' in address:\n web_comercio = address\n joined_list.remove(address)\n else:\n if word in address:\n address_only = address.split(word)[0]\n phone = address.split(word)[1]\n joined_list.remove(address)\n cleaned_phone_number = phone.replace(\"(\", \"\").replace(\")\", \"\").replace(\"+\", \"\").replace(\"-\",\"\").replace(\":\", \"\")\n if address_only not in address_phone_dict.keys():\n address_phone_dict[address_only] = cleaned_phone_number\n \n # Checks for a phone regex match if the previous for loop didn't missed a match\n for address in joined_list:\n # Searches through all the addresses\n phone_number = \"\"\n\n # Search for a matching regex pattern for a phone number \n # If a matching regex is found - assigns it as the value to the address key in the address_phone_dict\n # Checks to see if the \"Local\" keyword exists right before the found number to avoid writing part of the address\n # i.e. Local A200-A202-A204\n # (? with the features names\n '''\n if verbose:\n print('Fetching {:}...'.format(corpus_exception_file))\n \n corpus_exceptions_path = '{:}{:}'.format(CORPUS_EXCEPTIONS_DIR, corpus_exception_file)\n df = pd.read_csv(corpus_exceptions_path, sep='\\t')\n if verbose:\n print('Fetching {:}...done'.format(corpus_exception_file)) \n\n return set(df['TOKEN'].values)\n\n\n\ndef preprocess(lexicon, word2vec, verbose=True): \n '''\n 1. for NER entities within exception file\n replace by the tag organization, person, location\n 2. for smaller than 5 tokens replace by one hot encoding \n 3. include time i.e 20h30, 9h in number embeddings '0'\n 4. include ordinals 2º 2ª in number embeddings '0'\n 5. include tel._38-4048 in numeber embeddings '0'\n\n New Word embedding size = embedding size + one-hot enconding of 2 \n '''\n # define outputs\n total_words = len(lexicon)\n lexicon2token = dict(zip(lexicon, ['unk']*total_words))\n\n # fetch exceptions list\n pers = fetch_corpus_exceptions('corpus-word-missing-pers.txt', verbose=verbose)\n locs = fetch_corpus_exceptions('corpus-word-missing-locs.txt', verbose=verbose)\n orgs = fetch_corpus_exceptions('corpus-word-missing-orgs.txt', verbose=verbose)\n\n\n #define regex\n re_punctuation = re.compile(r'[{:}]'.format(string.punctuation), re.UNICODE)\n re_number = re.compile(r'^\\d+$')\n re_tel = re.compile(r'^tel\\._')\n re_time = re.compile(r'^\\d{1,2}h\\d{0,2}$')\n re_ordinals = re.compile(r'º|ª')\n\n for word in list(lexicon):\n # some hiffenized words belong to embeddings\n # ex: super-homem, fim-de-semana, pré-qualificar, caça-níqueis\n token = word.lower() \n if token in word2vec: \n lexicon2token[word]= token\n else:\n # if word in ['Rede_Globo', 'Hong_Kong', 'Banco_Central']:\n token = re_tel.sub('', token)\n token = re_ordinals.sub('', token)\n token = re_punctuation.sub('', token)\n\n token = re_time.sub('0', token)\n token = re_number.sub('0', token)\n\n if token in word2vec:\n lexicon2token[word]= token.lower()\n else:\n if word in pers:\n lexicon2token[word] = 'pessoa'\n else:\n if word in orgs:\n lexicon2token[word] = 'organização'\n else:\n if word in locs:\n lexicon2token[word] = 'local'\n\n total_tokens = len([val for val in lexicon2token.values() if not val in ('unk')])\n if verbose:\n print('Preprocess finished. Found {:} of {:} words, missing {:.2f}%'.format(total_tokens,\n total_words, 100*float(total_words-total_tokens)/ total_words)) \n\n return lexicon2token\n\ndef get_index(columns_list, columns_dims_dict, column_name):\n '''\n Returns column index from descriptor\n args:\n columns_list .: list input features + target\n columns_dims_dict .: dict holding the columns\n column_name .: str name of the column to get the index from\n\n returns:\n '''\n features_set = set(config.CATEGORICAL_FEATURES).union(config.EMBEDDED_FEATURES)\n used_set = set(columns_list)\n descriptor_list = sorted(list(features_set - used_set))\n index = 0\n for descriptor in descriptor_list:\n if descriptor == column_name:\n break\n else:\n index += columns_dims_dict[descriptor]\n return index\n\n\ndef get_dims(labels_list, labels_dim_dict):\n return sum([labels_dim_dict[label] for label in labels_list])\n\n\ndef get_binary(ds_type, embeddings, version='1.0'):\n if ds_type not in ('train', 'test', 'valid', 'deep'):\n raise ValueError('Invalid dataset label {:}'.format(ds_type))\n\n prefix = '' if ds_type in ('deep') else 'db'\n ext = 'pickle' if ds_type in ('deep') else 'tfrecords'\n dbname = '{:}{:}_{:}.{:}'.format(prefix, ds_type, embeddings, ext)\n return '{:}{:}/{:}'.format(INPUT_DIR, version, dbname)\n\n\ndef get_db_bounds(ds_type, version='1.0'):\n '''Returns upper and lower bound proposition for dataset\n\n Dataset breakdowns are done by partioning of the propositions\n\n Arguments:\n ds_type {str} -- Dataset type this must be `train`, `valid`, `test`\n\n Retuns:\n bounds {tuple({int}, {int})} -- Tuple with lower and upper proposition\n for ds_type\n\n Raises:\n ValueError -- [description]\n '''\n ds_tuple = ('train', 'valid', 'test',)\n version_tuple = ('1.0', '1.1',)\n\n if not(ds_type in ds_tuple):\n _msg = 'ds_type must be in {:} got \\'{:}\\''\n _msg = _msg.format(ds_tuple, ds_type)\n raise ValueError(_msg)\n\n if not(version in version_tuple):\n _msg = 'version must be in {:} got \\'{:}\\''\n _msg = _msg.format(version_tuple, version)\n raise ValueError(_msg)\n else:\n size_dict = config.DATASET_PROPOSITION_DICT[version]\n\n lb = 1\n ub = size_dict['train']\n\n if ds_type == 'train':\n return lb, ub\n else:\n lb += ub\n ub += size_dict['valid']\n if ds_type == 'valid':\n return lb, ub\n elif ds_type == 'test':\n lb += ub\n ub += size_dict['test']\n return lb, ub","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"267013272","text":"import sys\nsys.stdin = open('input.txt','r')\nT = int(input())\n\n\ndef Aton(inputlist):\n alist = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\n result = []\n for j in inputlist:\n for i, v in enumerate(alist):\n if j == v:\n result.append(i)\n return result\n\ndef NtoA(inputlist):\n alist = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\n result = []\n for j in inputlist:\n for i, v in enumerate(alist):\n if j == i:\n result.append(alist[i])\n return result\n\nfor tc in range(1,T+1):\n n = list(input().split())[1]\n\n\n a = list(input().split())\n a = sorted(Aton(a))\n b = NtoA(a)\n print(f'#{tc}')\n for i in b:\n print(i, end=\" \")","sub_path":"190220/aton/이중봉.py","file_name":"이중봉.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"636855074","text":"import Histogram\nimport HistogramMain\n\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport signal\nfrom scipy import stats\nimport sys\nimport operator\n\ndef process_args():\n # Required arguments - input file, D T L costs\n parser = argparse.ArgumentParser(\"\")\n parser.add_argument(\"--input\", metavar=\"\", required=True,\n help=\"The path to a folder of .newick files.\")\n parser.add_argument(\"-d\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a duplication.\")\n parser.add_argument(\"-t\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a transfer.\")\n parser.add_argument(\"-l\", type=int, metavar=\"\", required=True,\n help=\"The relative cost of a loss.\")\n parser.add_argument(\"--timeout\", type=int, metavar=\"\", required=False, default=300,\n help=\"The amount of time a single tree file can run before timing out.\")\n parser.add_argument(\"--min-mprs\", type=int, metavar=\"<#MPRs>\", required=False, default=10000,\n help=\"The minimum number of MPRs a reconciliation must have to use it.\")\n args = parser.parse_args()\n return args\n\nclass TimeoutError(Exception):\n pass\n\ndef timeout_handler(signum, frame):\n raise TimeoutError\n\ndef sample_hist(hist, n):\n s_v = sum(list(hist.values()))\n # Do not sample if the histogram is smaller than the desired sample size\n if s_v < n:\n # The population for each key\n key_population = [[k]*v for k,v in hist.items()]\n # Flatten that list\n r = reduce(operator.concat, key_population)\n return r\n else:\n k = list(hist.keys())\n v = list(hist.values())\n # Convert v to a probability distribution\n p_v = [float(i)/s_v for i in v]\n # would use random.choices in 3.6\n return np.random.choice(k, n, p=p_v)\n\ndef hierarchical_cluster(hists):\n pass\n\n#TODO: consider smoothing\n# Although smoothing will remove even/odd parity differences.\ndef hist_to_array(hist):\n n = max(list(hist.keys()))\n l = []\n for i in range(n):\n if i in hist:\n l.append(hist[i])\n else:\n l.append(0)\n return np.array(l)\n\n# Shift-invariant Jensen-Shannon distance\ndef array_dist(a1, a2):\n # Begin by aligning the arrays via correlation\n c = np.correlate(a1, a2, mode='same')\n m = np.argmax(c)[0]\n shift = np.array([0] * m)\n l1 = len(a1)\n l2 = len(a2)\n if l1 < l2:\n new_a1 = np.concatenate(shift, a1)\n new_a2 = a2\n else:\n new_a1 = a1\n new_a2 = np.concatenate(shift, a2)\n return scipy.spatial.distance.jensenshannon(new_a1, new_a2)\n\ndef find_hists(pathstr, d, t, l, timeout=10, min_mprs=0, normalize=False, zero_loss=False):\n p = Path(pathstr)\n all_files = [f for f in p.glob(\"**/*\") if f.is_file()]\n tree_files = [f for f in all_files if f.suffix == \".newick\"]\n filenames = []\n histograms = []\n times = []\n for (i, f) in enumerate(tree_files):\n sys.stdout.write(\"{}/{}\\r\".format(i, len(tree_files)))\n sys.stdout.flush()\n # Time out if it's taking too long to calculate the histogram\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(timeout)\n try:\n hist, time = HistogramMain.calc_histogram(str(f), d,t,l, True, normalize, zero_loss)\n except TimeoutError:\n print(\"\")\n print(\"{} timed out\".format(f))\n continue\n except AssertionError:\n print(\"\")\n print(\"{} asserted\".format(f))\n continue\n signal.alarm(0)\n h_d = hist.histogram_dict\n s_v = sum(list(h_d.values()))\n # 20 is the minimum sample size for statistical testing to make sense\n # Also make sure it has above the minimum number of MPRs\n if s_v >= 20 and h_d[0] > min_mprs:\n filenames.append(f)\n histograms.append(hist)\n times.append(time)\n print(\"\")\n return filenames, histograms, times\n\ndef normality(hist_sample):\n # p is 1-probability of rejecting null hypothesis\n # So if p < 0.05 we can say with 95% confidence that the sample is not normal\n _,p = stats.normaltest(hist_sample)\n return p\n\ndef normal_sort(names, hists):\n samples = [sample_hist(h, 10000) for h in hists]\n normalities = [normality(s) for s in samples]\n z = zip(names, hists, samples, normalities)\n z = sorted(z, key=lambda x: x[3])\n return z\n\nif __name__ == \"__main__\":\n args = process_args()\n names, hists, times = find_hists(args.input, args.d, args.t, args.l, timeout=args.timeout, min_mprs=10000, normalize=True, zero_loss=True)\n print(\"DATA\")\n # Compute the timing information\n print(\"Timing:\")\n for i in range(len(names)):\n print(str(names[i]), times[i])\n time_mean = np.mean(times)\n time_std = np.std(times)\n time_max = np.max(times)\n print(\"Time:\")\n print(\"Mean: {}\".format(time_mean))\n print(\"Standard Deviation: {}\".format(time_std))\n print(\"Maximum: {}\".format(time_max))\n # Find the mean and standard deviation of the histograms\n all_hists = Histogram.Histogram.sum(hists)\n m = all_hists.mean()\n s = all_hists.standard_deviation()\n print(\"Distance:\")\n print(\"Mean: {}\".format(m))\n print(\"Standard Deviation: {}\".format(s))\n # Sort them by normality\n hist_ds = [h.histogram_dict for h in hists]\n l = normal_sort(names, hist_ds)\n print(\"Normality:\")\n for i in l:\n print(str(i[0]), i[3])\n \n","sub_path":"HistogramNormal.py","file_name":"HistogramNormal.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"593591701","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 10:08:26 2018\n\n@author: monish.mukherjee\n\"\"\"\nimport matplotlib.pyplot as plt\nimport time\nimport helics as h\nimport logging\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n\ndef destroy_federate(fed):\n h.helicsFederateFinalize(fed)\n h.helicsFederateFree(fed)\n h.helicsCloseLibrary()\n\n\nif __name__ == \"__main__\":\n\n ################################# Registering federate from json ########################################\n\n fed = h.helicsCreateCombinationFederateFromConfig(\"Control.json\")\n federate_name = h.helicsFederateGetName(fed)\n print(federate_name)\n endpoint_count = h.helicsFederateGetEndpointCount(fed)\n subkeys_count = h.helicsFederateGetInputCount(fed)\n print(subkeys_count)\n print(endpoint_count)\n ###################### Reference to Publications and Subscription form index #############################\n endid = {}\n subid = {}\n for i in range(0, endpoint_count):\n endid[\"m{}\".format(i)] = h.helicsFederateGetEndpointByIndex(fed, i)\n end_name = h.helicsEndpointGetName(endid[\"m{}\".format(i)])\n logger.info(\"Registered Endpoint ---> {}\".format(end_name))\n\n for i in range(0, subkeys_count):\n subid[\"m{}\".format(i)] = h.helicsFederateGetInputByIndex(fed, i)\n status = h.helicsInputSetDefaultComplex(subid[\"m{}\".format(i)], 0, 0)\n sub_key = h.helicsSubscriptionGetKey(subid[\"m{}\".format(i)])\n logger.info(\"Registered Subscription ---> {}\".format(sub_key))\n\n print(\n \"###############################################################################################\"\n )\n print(\n \"######################## Entering Execution Mode ##########################################\"\n )\n ###################### Entering Execution Mode ##########################################################\n h.helicsFederateEnterExecutingMode(fed)\n\n hours = 24\n total_inteval = int(60 * 60 * hours)\n grantedtime = -1\n update_interval = 5 * 60\n feeder_limit_upper = 4 * (1000 * 1000)\n feeder_limit_lower = 2.7 * (1000 * 1000)\n k = 0\n data = {}\n time_sim = []\n feeder_real_power = []\n feeder_imag_power = []\n for t in range(0, total_inteval, update_interval):\n\n while grantedtime < t:\n grantedtime = h.helicsFederateRequestTime(fed, t)\n time.sleep(0.1)\n\n time_sim.append(t / 3600)\n ############################# Subscribing to Feeder Load from to GridLAB-D ##############################################\n key = []\n Real_demand = []\n Imag_demand = []\n for i in range(0, subkeys_count):\n sub = subid[\"m{}\".format(i)]\n rload, iload = h.helicsInputGetComplex(sub)\n sub_key = h.helicsSubscriptionGetKey(sub)\n print(sub_key)\n if \"totalLoad\" in str(sub_key):\n key_feeder_load = sub_key\n distribution_fed_name = str(key_feeder_load.split(\"/totalLoad\")[0])\n Real_feeder_load = rload\n Imag_feeder_load = iload\n feeder_real_power.append(rload / 1000)\n feeder_imag_power.append(iload / 1000)\n else:\n try:\n data[sub_key].append(rload / 1000)\n except KeyError:\n data[sub_key] = [rload / 1000]\n\n key.append(sub_key)\n Real_demand.append(rload)\n Imag_demand.append(iload)\n\n logger.info(\"EV Controller grantedtime = {}\".format(grantedtime))\n\n logger.info(\"Total Feeder Load is {} + {} j\".format(Real_feeder_load, Imag_feeder_load))\n\n if Real_feeder_load > feeder_limit_upper:\n logger.info(\"Total Feeder Load is over the Feeder Upper Limit\")\n logger.info(\"Warning ----> Feeder OverLimit ---> Turn off EV\")\n\n if k < endpoint_count:\n end = endid[\"m{}\".format(k)]\n logger.info(\"endid: {}\".format(endid))\n end_name = str(h.helicsEndpointGetName(end))\n logger.info(\"Sending endpoint name: {}\".format(end_name))\n destination_name = end_name.replace(federate_name, distribution_fed_name)\n logger.info(\n \"Endpoint destination: {}\".format(h.helicsEndpointGetDefaultDestination(end))\n )\n status = h.helicsEndpointSendMessageRaw(end, \"\", str(\"0 + 0 j\")) #\n logger.info(\"Endpoint sending status: {}\".format(status))\n logger.info(\"Turning off {}\".format(end_name))\n k = k + 1\n else:\n logger.info(\"All EVs are Turned off\")\n\n if Real_feeder_load < feeder_limit_lower:\n logger.info(\"Total Feeder Load is under the Feeder Lower Limit\")\n logger.info(\"Feeder Can Support EVs ------> Turn on EV\")\n if k > 0:\n k = k - 1\n end = endid[\"m{}\".format(k)]\n end_name = h.helicsEndpointGetName(end)\n destination_name = end_name.replace(federate_name, distribution_fed_name)\n print(\"Endpoint Destination {}\".format(destination_name))\n status = h.helicsEndpointSendMessageRaw(end, \"\", str(\"200000 + 0 j\"))\n logger.info(\"Turning on {}\".format(end_name))\n else:\n logger.info(\"All EVs are Turned on\")\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n i = 1\n for keys in data:\n ax = fig.add_subplot(2, 3, i)\n ax.plot(time_sim, data[keys])\n ax.set_ylabel(\"EV Output in kW\")\n ax.set_xlabel(\"Time \")\n ax.set_title(keys)\n i = i + 1\n\n plt.show(block=True)\n data[\"time\"] = time_sim\n data[\"feeder_load(real)\"] = feeder_real_power\n pd.DataFrame.from_dict(data=data).to_csv(\"EV_Outputs.csv\", header=True)\n\n t = 60 * 60 * 24\n while grantedtime < t:\n grantedtime = h.helicsFederateRequestTime(fed, t)\n logger.info(\"Destroying federate\")\n destroy_federate(fed)\n","sub_path":"docs/user-guide/examples/user_guide_examples/Example_1b/EV_Controller/EV_Controller.py","file_name":"EV_Controller.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"196537492","text":"\"\"\"A logger. Because apparently the default one isn't good enough.\"\"\"\n\nfrom collections import defaultdict\nimport datetime\n\nDEFAULT_DATE_FORMAT = \"%Y-%m-%d %H:%M:%S%z\"\nDEFAULT_FORMAT = \"[{datetime}] {level}: {message}\"\nDEFAULT_LEVEL = \"INFO\"\nNEWLINE = '\\n'\n\nLEVELS = {\"NOTSET\": 00,\n \"DEBUG\": 10,\n \"INFO\": 20,\n \"NOTICE\": 30,\n \"WARNING\": 40,\n \"ERROR\": 50,\n \"CRITICAL\": 60}\n\nclass Writer(object):\n def __init__(self, output, tags=None, level=DEFAULT_LEVEL,\n format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT):\n self.output = output\n self.tags = tags if tags is not None else ['*']\n self.level = level\n self.int_level = LEVELS.get(level, 0)\n self.format = format\n self.date_format = date_format\n \n def write(self, line):\n self.output.write(line)\n self.output.write(NEWLINE)\n self.output.flush()\n \n def _do_write(self, message):\n line = self._pre_write(message)\n self.write(line)\n \n def _pre_write(self, message):\n args = message.args()\n args['datetime'] = args['datetime'].strftime(self.date_format)\n line = self.format.format(**args)\n return line\n\n## IRC ERRORS:\nclass NoHandlerError(NotImplementedError):\n pass\n\nclass IRCWriter(Writer):\n def __init__(self, output, tags=None, level=DEFAULT_LEVEL,\n format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT,\n irc_handler=None):\n Writer.__init__(self, output, tags, level, format, date_format)\n self.irc_handler = None\n \n def write(self, line):\n if self.irc_handler is None:\n raise NoHandlerError\n \n self.irc_handler.send_message(self.output, message)\n \n def add_irc_handler(self, handler):\n self.irc_handler = handler\n\nclass Message(object):\n def __init__(self, message, level=DEFAULT_LEVEL,\n tags=None, *args, **kwargs):\n self.tags = [] if tags is None else tags\n self.raw_message = message\n self.message = message.format(*args, **kwargs)\n self.level = level\n self.datetime = datetime.datetime.today()\n \n def args(self):\n new_dict = {}\n new_dict.update(self.__dict__)\n return new_dict\n\nclass Logger(object):\n \n instances = {}\n \n def __new__(cls, name=\"k-eight\", *args, **kwargs):\n if name in cls.instances:\n return cls.instances[name]\n else:\n new = object.__new__(cls, *args, **kwargs)\n new.name = name\n cls.instances[name] = new\n return new\n \n def __init__(self, name=\"k-eight\", writers=None):\n if not hasattr(self, 'writers'):\n self.writers = [] if writers is None else writers\n \n def log(self, message, tags=None, level=DEFAULT_LEVEL, *args, **kwargs):\n message = Message(message, level, tags, *args, **kwargs)\n if tags is None:\n for writer in self.writers:\n if '*' in writer.tags:\n if writer.int_level <= LEVELS.get(message.level, 0):\n writer._do_write(message)\n tags = []\n for tag in tags:\n for writer in self.writers:\n if tag in writer.tags or '*' in writer.tags:\n if writer.int_level <= LEVELS.get(message.level, 0):\n writer._do_write(message)\n \n def debug(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"DEBUG\", *args, **kwargs)\n \n def info(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"INFO\", *args, **kwargs)\n \n def notice(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"NOTICE\", *args, **kwargs)\n \n def warning(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"WARNING\", *args, **kwargs)\n \n def error(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"ERROR\", *args, **kwargs)\n \n def critical(self, message, tags=None, *args, **kwargs):\n self.log(message, tags, level=\"CRITICAL\", *args, **kwargs)\n \n def add_writers(self, *writers):\n self.writers.extend(writers)\n \n def add_writer(self, writer):\n self.writers.append(writer)","sub_path":"tools/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"115758959","text":"from flask import Flask, jsonify, make_response, abort, request\nfrom models import biblioteka\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"nininini\"\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"GET\"])\ndef biblioteka_list_api_v1():\n return jsonify(biblioteka.all())\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"GET\"])\ndef get_book(book_id):\n book = biblioteka.get(book_id)\n if not book:\n abort(404)\n return jsonify({\"book\": book})\n\n@app.route(\"/api/v1/biblioteka/\", methods=[\"POST\"])\ndef create_book():\n if not request.json or not 'title' in request.json:\n abort(400)\n\n book = {\n 'author': request.json.get('author', 'None'),\n 'id': biblioteka.all()[-1]['id'] + 1,\n 'title': request.json['title'],\n 'year': request.json.get('year', 0),\n 'read': False\n }\n biblioteka.create(book)\n return jsonify({'book': book}), 201\n\n@app.route(\"/api/v1/biblioteka/\", methods=['DELETE'])\ndef delete_book(book_id):\n result = biblioteka.delete(book_id)\n if not result:\n abort(404)\n return jsonify({'result': result})\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found', 'status_code': 404}), 404)\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': 'Bad request', 'status_code': 400}), 400)\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"501503725","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\nfrom django import forms\nfrom .models import Profesor, Asignatura, AsignaturaDicatada, Estudiante\n\n\nclass ProfesorForm(ModelForm):\n class Meta:\n model = Profesor\n fields = ['profesor_nombre']\n labels = {'profesor_nombre': 'Nombre'}\n\n\nclass AsignaturaForm(ModelForm):\n class Meta:\n model = Asignatura\n fields = ['idAsignatura', 'asignatura_nombre']\n labels = {\n 'idAsignatura': 'Código de la Asignatura',\n 'asignatura_nombre': 'Nombre de la Asignatura'\n }\n\n\nclass AsignaturaDictadaForm(ModelForm):\n class Meta:\n model = AsignaturaDicatada\n fields = ['idAsignatura', 'idProfesor', 'asignatura_dictada_periodo']\n labels = {\n 'idAsignatura': 'Asignatura',\n 'idProfesor': 'Profesor',\n 'asignatura_dictada_periodo': 'Dictada'\n }\n\n\nclass EstudianteForm(ModelForm):\n \"\"\"\n ModelForm para un estudiante\n \"\"\"\n class Meta:\n model = Estudiante\n fields = ['idUniversidad']\n labels = {'idUniversidad': 'Universidad', }","sub_path":"apps/universidad/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"606174156","text":"'''\nCreated on Dec 21, 2016\n\n@author: Ahmed Sirage\n'''\nfrom nltk import SnowballStemmer\nfrom tensorflow.python.framework.tensor_shape import vector\nfrom nltk import word_tokenize\n\n\ndef get_line_stem(word):\n s = SnowballStemmer(\"english\")\n return s.stem(word)\n\n\ndef init_lexicon_dictionary():\n lexicon_dictionary = {} \n lexicon_stemmed_dictionary = {}\n ld_file = open('lexicon_dictionary.txt','r')\n for line in ld_file:\n word = line[:-22]\n vector = line[-21:-2].split()\n lexicon_dictionary[word] = vector\n lexicon_stemmed_dictionary[get_line_stem(word)] = vector\n ld_file.close()\n \n return lexicon_dictionary, lexicon_stemmed_dictionary\n\n\nclass sentence:\n '''\n classdocs\n '''\n \n \n\n\n\n\n lexicon_dictionary, lexicon_stemmed_dictionary = init_lexicon_dictionary()\n length = 16\n bayz=0\n kolo=0\n\n def __init__(self, text,emoj):\n '''\n Constructor\n '''\n self.text = text\n self.emoj = emoj\n self.words = {}\n self.tokens = word_tokenize(text, \"english\")\n\n\n for word in self.tokens:\n if word.lower() in sentence.lexicon_dictionary:\n if '1' in sentence.lexicon_dictionary[word.lower()]:\n self.words[word.lower()] = sentence.lexicon_dictionary[word.lower()]\n elif word.lower() in sentence.lexicon_stemmed_dictionary:\n if '1' in sentence.lexicon_stemmed_dictionary[word.lower()]:\n self.words[word.lower()] = sentence.lexicon_stemmed_dictionary[word.lower()]\n else:\n word_stem = get_line_stem(word) \n if word_stem.lower() in sentence.lexicon_dictionary:\n if '1' in sentence.lexicon_dictionary[word_stem]:\n self.words[word.lower()] = sentence.lexicon_dictionary[word_stem]\n elif word_stem.lower() in sentence.lexicon_stemmed_dictionary:\n if '1' in sentence.lexicon_stemmed_dictionary[word_stem]:\n self.words[word.lower()] = sentence.lexicon_stemmed_dictionary[word_stem]\n \n \n \n def get_words(self):\n return self.words \n \n def get_text(self):\n return self.text \n \n def get_len(self):\n return sentence.length \n \n \n def get_emoj(self):\n return self.emoj \n \n \n def init_sent_vector(self):\n self.vector=[]\n temp=[0,0,0,0,0,0,0,0,0,0]\n for word in self.words:\n for vec in self.words[word]:\n self.vector.append(int(vec))\n if len(self.vector) < (sentence.length*10):\n for i in range((sentence.length*10)-len(self.vector)):\n self.vector.append(0)\n for word in self.words:\n print(self.words[word])\n d=0\n for i in self.words[word]:\n if(temp[d] == 1 or i == '1'):\n temp[d] = 1\n d=d+1\n \n for i in temp:\n self.vector.append(i)\n \n \n def get_sentence_vector(self):\n return self.vector \n \n \n \n ","sub_path":"website/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"197454133","text":"import os, sys\nfrom string import ascii_lowercase\n\ndirname, filename = os.path.split(os.path.abspath(sys.argv[0]))\n\ndef is_same_type(a, b):\n return a.lower() == b.lower()\n\ndef is_opposite_polarity(a, b):\n return (a.islower() and b.isupper()) or (a.isupper() and b.islower())\n\ndef do_react(a, b):\n return is_same_type(a, b) and is_opposite_polarity(a, b)\n\ndef remove_unit(unit, polymer):\n polymer = polymer.replace(unit.lower(), \"\")\n polymer = polymer.replace(unit.upper(), \"\")\n\n return polymer\n\npolymer = \"\"\n\n##open file and store \"polymer\"\nwith open(os.path.join(dirname, \"input.txt\")) as fileobj:\n for line in fileobj: \n for ch in line: \n polymer += ch\n\n##initialize min\noriginal_polymer = polymer\nmin = len(polymer)\n\n##loop through alphabet\nfor c in ascii_lowercase:\n\n i = 0\n polymer = remove_unit(c, polymer)\n\n ##loop through polymer\n while i < len(polymer): \n ##if this is the last unit, then we can stop the loop\n if i >= len(polymer) - 1:\n break\n\n ##otherwise, continue checking for reactive units\n current_unit = polymer[i]\n next_unit = polymer[i + 1]\n\n ##if the units react, remove them and start over at previous unit (if exists)\n if do_react(current_unit, next_unit):\n polymer = polymer[:i] + polymer[i + 2:]\n if i > 0:\n i -= 1\n else:\n i = 0\n ##otherwise just go to next unit\n else:\n i += 1\n \n ##if the length of this polymer is the new smallest, save it\n if len(polymer) < min:\n min = len(polymer)\n \n polymer = original_polymer\n\nprint(min)\n\n","sub_path":"5/5-2.py","file_name":"5-2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"230091656","text":"import os\nimport config\n\nos.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id\nimport shutil\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\nfrom tensorboardX import SummaryWriter\n\nfrom model.model import East\nfrom model.loss import EastLoss\nfrom dataset.data_utils import custom_dset, collate_fn\nimport config\nfrom utils import *\nfrom eval import eval\n\n\ndef train_epoch(model, optimizer, scheduler, train_loader, device, criterion, epoch, all_step, writer, logger):\n model.train()\n train_loss = 0.\n start = time.time()\n lr = scheduler.get_lr()[0]\n\n for i, (img, score_map, geo_map, training_mask) in enumerate(train_loader):\n cur_batch = img.size()[0]\n img, score_map, geo_map, training_mask = img.to(device), score_map.to(device), geo_map.to(\n device), training_mask.to(device)\n\n f_score, f_geometry = model(img)\n loss = criterion(score_map, f_score, geo_map, f_geometry, training_mask)\n\n # backward\n scheduler.step()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss = loss.item()\n train_loss += loss\n cur_step = epoch * all_step + i\n writer.add_scalar(tag='Train/loss', scalar_value=loss, global_step=cur_step)\n writer.add_scalar(tag='Train/lr', scalar_value=lr, global_step=cur_step)\n\n if i % config.display_interval == 0:\n batch_time = time.time() - start\n logger.info(\n '[{}/{}], [{}/{}], step: {}, {:.3f} samples/sec, batch_loss: {:.4f} time:{:.4f}, lr:{}'.format(\n epoch, config.epochs, i, all_step, cur_step, config.display_interval * cur_batch / batch_time,\n loss, batch_time, lr))\n start = time.time()\n\n return train_loss / all_step, lr\n\n\n\ndef main():\n if config.output_dir is None:\n config.output_dir = 'output'\n if config.restart_training:\n shutil.rmtree(config.output_dir, ignore_errors=True)\n if not os.path.exists(config.output_dir):\n os.makedirs(config.output_dir)\n logger = setup_logger(os.path.join(config.output_dir, 'train_log'))\n\n torch.manual_seed(config.seed) # 为CPU设置随机种子\n if config.gpu_id is not None and torch.cuda.is_available():\n torch.backends.cudnn.benchmark = True\n logger.info('train with gpu {} and pytorch {}'.format(config.gpu_id, torch.__version__))\n device = torch.device(\"cuda:0\")\n torch.cuda.manual_seed(config.seed) # 为当前GPU设置随机种子\n torch.cuda.manual_seed_all(config.seed) # 为所有GPU设置随机种子\n else:\n logger.info('train with cpu and pytorch {}'.format(torch.__version__))\n device = torch.device(\"cpu\")\n writer = SummaryWriter(config.output_dir)\n # Model\n model = East()\n if not config.pretrained and not config.restart_training:\n init_weights(model, init_type=config.init_type)\n num_gpus = torch.cuda.device_count()\n if num_gpus > 1:\n model = nn.DataParallel(model)\n model = model.to(device)\n\n train_data = custom_dset(config.trainroot)\n train_loader = DataLoader(train_data, batch_size=config.train_batch_size_per_gpu * num_gpus,\n shuffle=True, collate_fn=collate_fn, num_workers=config.workers)\n criterion = EastLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n\n if config.checkpoint != '' and not config.restart_training:\n start_epoch = load_checkpoint(config.checkpoint, model, logger, device)\n start_epoch += 1\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma,\n last_epoch=start_epoch)\n else:\n start_epoch = config.start_epoch\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma)\n\n all_step = len(train_loader)\n logger.info('train dataset has {} samples,{} in dataloader'.format(train_data.__len__(), all_step))\n best_model = {'recall': 0, 'precision': 0, 'f1': 0, 'model': ''}\n\n try:\n for epoch in range(start_epoch, config.epochs):\n start = time.time()\n train_loss, lr = train_epoch(model, optimizer, scheduler, train_loader, device, criterion, epoch, all_step,\n writer, logger)\n logger.info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(\n epoch, config.epochs, train_loss, time.time() - start, lr))\n if epoch % 4 == 0 or train_loss < 0.005:\n recall, precision, f1 = eval(model, os.path.join(config.output_dir, 'output'), config.testroot, device)\n logger.info('test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.format(recall, precision, f1))\n\n net_save_path = '{}/PSENet_{}_loss{:.6f}_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(config.output_dir, epoch,\n 0.1,\n recall,\n precision,\n f1)\n save_checkpoint(net_save_path, model, optimizer, epoch, logger)\n if f1 > best_model['f1']:\n best_model['recall'] = recall\n best_model['precision'] = precision\n best_model['f1'] = f1\n best_model['model'] = net_save_path\n writer.add_scalar(tag='Test/recall', scalar_value=recall, global_step=epoch)\n writer.add_scalar(tag='Test/precision', scalar_value=precision, global_step=epoch)\n writer.add_scalar(tag='Test/f1', scalar_value=f1, global_step=epoch)\n writer.close()\n except KeyboardInterrupt:\n save_checkpoint('{}/final.pth'.format(config.output_dir), model, optimizer, epoch, logger)\n finally:\n if best_model['model']:\n shutil.copy(best_model['model'],\n '{}/best_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(config.output_dir, best_model['recall'],\n best_model['precision'], best_model['f1']))\n logger.info(best_model)\n\n # for epoch in range(start_epoch, config.max_epochs):\n #\n # train(train_loader, model, criterion, scheduler, optimizer, epoch)\n #\n # if epoch % config.eval_iteration == 0:\n #\n # # create res_file and img_with_box\n # output_txt_dir_path = predict(model, criterion, epoch)\n #\n # # Zip file\n # submit_path = MyZip(output_txt_dir_path, epoch)\n #\n # # submit and compute Hmean\n # hmean_ = compute_hmean(submit_path)\n #\n # if hmean_ > hmean:\n # is_best = True\n #\n # state = {\n # 'epoch' : epoch,\n # 'state_dict' : model.state_dict(),\n # 'optimizer' : optimizer.state_dict(),\n # 'is_best' : is_best,\n # }\n # save_checkpoint(state, epoch)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"156810033","text":"#!/usr/bin/python3\n# Connect to TTS MQTT Server and receive uplink messages using the Paho MQTT Python client library\n#\n# Original source:\n# https://github.com/descartes/TheThingsStack-Integration-Starters/blob/main/MQTT-to-Tab-Python3/TTS.MQTT.Tab.py\n#\n# Instructions to use Eclipse Paho MQTT Python client library:\n# https://www.thethingsindustries.com/docs/integrations/mqtt/mqtt-clients/eclipse-paho/)\n#\nimport os\nimport sys\nimport logging\nimport paho.mqtt.client as mqtt\nimport json\nimport csv\nimport random\nfrom datetime import datetime\n\n# Procedure to get the USER, PASSWORD, PUBLIC_TLS_ADDRESS and PUBLIC_TLS_ADDRESS_PORT:\n# 1. Login to The Things Stack Community Edition console\n# https://console.cloud.thethings.network/\n# 2. Select Go to applications\n# 3. Select your application\n# 4. On the left hand side menu, select Integrations | MQTT\n# 5. See Connection credentials\n# 6. For the password press button: Generate new API key\n# Each time you press this button a new password is generated!\n# The password looks like:\n# NNSXS.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n#\nUSER = \"pepsi@ttn\"\nPASSWORD = \"NNSXS.ZQDPGYRR4TRCXUGJUFKN4WEEJO2ZQDNCQC2JL6A.I4HU2GYDHU4VQ7K2MA7HEMFFNJXZQFLGJJTV7MC3RZKICJ2YE5XA\"\nPUBLIC_TLS_ADDRESS = \"eu1.cloud.thethings.network\"\nPUBLIC_TLS_ADDRESS_PORT = 8883\nDEVICE_ID = \"eui-0004a30b001e5e43\"\nALL_DEVICES = True\n\n# Meaning Quality of Service (QoS)\n# QoS = 0 - at most once\n# The client publishes the message, and there is no acknowledgement by the broker.\n# QoS = 1 - at least once\n# The broker sends an acknowledgement back to the client.\n# The client will re-send until it gets the broker's acknowledgement.\n# QoS = 2 - exactly once\n# Both sender and receiver are sure that the message was sent exactly once, using a kind of handshake\nQOS = 0\n\nDEBUG = True\n\n\ndef get_value_from_json_object(obj, key):\n try:\n return obj[key]\n except KeyError:\n return '-'\n\n\ndef stop(client):\n client.disconnect()\n print(\"\\nExit\")\n sys.exit(0)\n\n\n# Write uplink to tab file\ndef save_to_file(some_json):\n end_device_ids = some_json[\"end_device_ids\"]\n device_id = end_device_ids[\"device_id\"]\n application_id = end_device_ids[\"application_ids\"][\"application_id\"]\n received_at = some_json[\"received_at\"]\n\n if 'uplink_message' in some_json:\n uplink_message = some_json[\"uplink_message\"]\n f_port = get_value_from_json_object(uplink_message, \"f_port\")\n\n # check if f_port is found\n if f_port != '-':\n f_cnt = get_value_from_json_object(uplink_message, \"f_cnt\")\n frm_payload = uplink_message[\"frm_payload\"]\n # If decoded_payload is a json object or a string \"-\" it will be converted to string\n decoded_payload = str(get_value_from_json_object(uplink_message, \"decoded_payload\"))\n rssi = get_value_from_json_object(uplink_message[\"rx_metadata\"][0], \"rssi\")\n snr = get_value_from_json_object(uplink_message[\"rx_metadata\"][0], \"snr\")\n data_rate_index = get_value_from_json_object(uplink_message[\"settings\"], \"data_rate_index\")\n consumed_airtime = get_value_from_json_object(uplink_message, \"consumed_airtime\")\n\n # Daily log of uplinks\n now = datetime.now()\n path_n_file = now.strftime(\"%Y%m%d\") + \".txt\"\n print(path_n_file)\n if not os.path.isfile(path_n_file):\n with open(path_n_file, 'a', newline='') as tabFile:\n fw = csv.writer(tabFile, dialect='excel-tab')\n fw.writerow([\"received_at\", \"application_id\", \"device_id\", \"f_port\", \"f_cnt\", \"rssi\", \"snr\",\n \"data_rate_index\", \"consumed_airtime\", \"frm_payload\", \"decoded_payload\"])\n\n with open(path_n_file, 'a', newline='') as tabFile:\n fw = csv.writer(tabFile, dialect='excel-tab')\n fw.writerow([received_at, application_id, device_id, f_port, f_cnt, rssi, snr,\n data_rate_index, consumed_airtime, frm_payload, decoded_payload])\n\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"\\nConnected successfully to MQTT broker\")\n else:\n print(\"\\nFailed to connect, return code = \" + str(rc))\n\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, message):\n print(\"\\nMessage received on topic '\" + message.topic + \"' with QoS = \" + str(message.qos))\n\n parsed_json = json.loads(message.payload)\n\n if DEBUG:\n print(\"Payload (Collapsed): \" + str(message.payload))\n print(\"Payload (Expanded): \\n\" + json.dumps(parsed_json, indent=4))\n\n save_to_file(parsed_json)\n\n\n# mid = message ID\n# It is an integer that is a unique message identifier assigned by the client.\n# If you use QoS levels 1 or 2 then the client loop will use the mid to identify messages that have not been sent.\ndef on_subscribe(client, userdata, mid, granted_qos):\n print(\"\\nSubscribed with message id (mid) = \" + str(mid) + \" and QoS = \" + str(granted_qos))\n\n\ndef on_disconnect(client, userdata, rc):\n print(\"\\nDisconnected with result code = \" + str(rc))\n\n\ndef on_log(client, userdata, level, buf):\n print(\"\\nLog: \" + buf)\n logging_level = client.LOGGING_LEVEL[level]\n logging.log(logging_level, buf)\n\n\n# Generate client ID with pub prefix randomly\nclient_id = f'python-mqtt-{random.randint(0, 1000)}'\n\nprint(\"Create new mqtt client instance\")\nmqttc = mqtt.Client(client_id)\n\nprint(\"Assign callback functions\")\nmqttc.on_connect = on_connect\nmqttc.on_subscribe = on_subscribe\nmqttc.on_message = on_message\nmqttc.on_disconnect = on_disconnect\n# mqttc.on_log = on_log # Logging for debugging OK, waste\n\n# Setup authentication from settings above\nmqttc.username_pw_set(USER, PASSWORD)\n\n# IMPORTANT - this enables the encryption of messages\nmqttc.tls_set() # default certification authority of the system\n\n# mqttc.tls_set(ca_certs=\"mqtt-ca.pem\") # Use this if you get security errors\n# It loads the TTI security certificate. Download it from their website from this page: \n# https://www.thethingsnetwork.org/docs/applications/mqtt/api/index.html\n# This is normally required if you are running the script on Windows\n\nprint(\"Connecting to broker: \" + PUBLIC_TLS_ADDRESS + \":\" + str(PUBLIC_TLS_ADDRESS_PORT))\nmqttc.connect(PUBLIC_TLS_ADDRESS, PUBLIC_TLS_ADDRESS_PORT, 60)\n\n\nif ALL_DEVICES:\n print(\"Subscribe to all topics (#) with QoS = \" + str(QOS))\n mqttc.subscribe(\"#\", QOS)\nelif len(DEVICE_ID) != 0:\n topic = \"v3/\" + USER + \"/devices/\" + DEVICE_ID + \"/up\"\n print(\"Subscribe to topic \" + topic + \" with QoS = \" + str(QOS))\n mqttc.subscribe(topic, QOS)\nelse:\n print(\"Can not subscribe to any topic\")\n stop(mqttc)\n\n\nprint(\"And run forever\")\ntry:\n run = True\n while run:\n mqttc.loop(10) # seconds timeout / blocking time\n # print(\".\", end=\"\", flush=True) # feedback to the user that something is actually happening\nexcept KeyboardInterrupt:\n stop(mqttc)\n","sub_path":"uplink_download.py","file_name":"uplink_download.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"24706495","text":"# -*- coding:utf-8 -*-\r\nclass Solution:\r\n\r\n def VerifySquenceOfBST(self, sequence):\r\n # write code here\r\n if not sequence:\r\n return True\r\n root = sequence[-1]\r\n # 找到最大值结点\r\n for i in range(len(sequence)):\r\n if sequence[i] > root:\r\n break\r\n # 检查最大值结点右侧的数值是否违规\r\n for i in range(i, len(sequence) - 1):\r\n if sequence[i] < root:\r\n return False\r\n # 递归\r\n return self.VerifySquenceOfBST(sequence[:i]) \\\r\n and self.VerifySquenceOfBST(sequence[i:-1])\r\n\r\nif __name__ == '__main__':\r\n # arr = [5, 7, 6, 9, 11, 10, 8]\r\n arr = []\r\n solo = Solution()\r\n result = solo.VerifySquenceOfBST(arr)\r\n print(result)","sub_path":"towords_offer/tree/33 二叉搜索树的后续遍历序列.py","file_name":"33 二叉搜索树的后续遍历序列.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"314903122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 20:01:57 2018\n\n@author: dell\n\"\"\"\nimport pandas as pd\nimport os\nimport re\nimport urllib\nimport pymysql\nimport numpy as np\n\n\nf = open('C:/Users/htc/Desktop/总和.txt')\na = f.read()\nb = a.strip()\ncids = re.findall('data-cid=\"(\\d+)\"', b)\nnames = re.findall('data-name=\"([^\"].*)\"', b)\ndp = pd.DataFrame({'cid':cids,'pro':names})\n#pieces = dict(list(dp.groupby('cid')))\nCheck = list(set(cids))\nindustry = ['彩妆/香水/美妆工具','美容护肤/美体/精油','运动服/休闲服装','保健食品/膳食营养补充食品','奶粉/辅食/营养品/零食','运动鞋new']\n\n#新增商品\nbrand_ = pd.read_excel('C:/Users/htc/Desktop/result.xlsx').values.T.tolist()\nurl = brand_[1]\ncvid = brand_[0]\null = list(zip(url,cvid))\nbrand_append_1 = []\nbrand_append_2 = []\nfor i in ull:\n itembrand = urllib.parse.unquote(re.findall('name=(\\S+)', i[0])[0])\n a = re.findall('data-cid=\"(\\d+)\"', i[1])\n b = re.findall('\">(\\D+)', i[1])#因为没数字\n caid = list(zip(a,b))\n for o in caid:\n if re.findall('(\\S+)', o[1])[0] in industry:\n if len(re.findall('(\\S+)', o[1])) == 3:\n brand_append_1.append((itembrand,o[0]))\n brand_append_2.append(o[0])\nCheck = list(set(Check+brand_append_2))\ndp1 = pd.DataFrame(brand_append_1,columns=['pro','cid'])\ndp1 = dp1[['cid','pro']]\ndp = pd.concat([dp,dp1],axis =0).reset_index()\npieces = dict(list(dp.groupby('cid')))\n\nbrand_out = pd.read_excel('C:/Users/htc/Desktop/country.xlsx')[0].tolist()\n'''\n#连接数据库\nconn = pymysql.connect(host=\"rm-bp12z8rh0j5503p6p2o.mysql.rds.aliyuncs.com\", \n user=\"wangquan\",\n passwd=\"Wq5985790\",\n db='tbdata',\n port=3306,\n charset='utf8')\ncur = conn.cursor()\nconn.commit()\ncur.execute('create table group_byss(itemName varchar(100), itemUrl varchar(80), itemBrand varchar(50), storeName varchar(50), storeCredit varchar(20), itemPromotion varchar(30), salesFaverite decimal, salesReview decimal, listPrice decimal(20,2) ,salesPrice decimal(20,2) , salesQty decimal, salesAmount decimal(20,2), catIVID decimal,catI varchar(50),catII varchar(50),catIII varchar(50),catIV varchar(50),dataPeriod varchar(20),主键 INT UNSIGNED NOT NULL PRIMARY KEY AUTO_INCREMENT, group1 decimal, group2 varchar(80))' )\ncur.execute('create table groups(itemName varchar(100), itemUrl varchar(80), itemBrand varchar(50), catIV varchar(50), primaryKey decimal, group1 decimal)' )#group是有特殊意义的就像'一样识别不了\n#创造\ncur.execute(\"truncate table group_byss\")\ncur.execute(\"drop table group_bys\")\n#清楚'''\n\n#ci = '50010815'\nh = 0\nj = 0\nfor ci in Check:\n h += 1\n #if h <= 82:\n # continue\n brand = pieces[ci]['pro'].tolist()\n brand = list(set(brand))\n for bra in brand:\n if bra not in brand_out:\n continue\n j += 1\n #if j < 4892:\n # continue\n bra = bra.replace('amp;','')\n #统一品牌名称\n bra_down=bra.replace(\"'\",'’')\n bra = urllib.parse.quote(bra,safe='@')\n if bra[-1] == '.':\n bra = bra.replace('.','。')\n os.chdir('D:/线下行业数据库/%s/%s/' % (ci,bra))\n filelist = os.listdir()\n list_1 = []\n for i in filelist:\n aa = pd.read_excel(i)\n aa['itemBrand'] = bra_down\n aa['group1'] = aa['itemUrl'].str.split('=').str[1]\n aa['group2'] = ''\n list_1.append(aa)\n #数据清洗\n if str(aa['catI'].tolist()[0]) not in industry:\n list_1 = []\n continue\n if list_1 == []:#防空文件夹,同时去掉错误文件\n continue\n \n bigdata_ = pd.concat(list_1)\n bigdata_ = bigdata_.reset_index(drop=True)\n bigdata_.loc[bigdata_[bigdata_['dataPeriod']=='2019年04月'].index.tolist(),'group2'] = bigdata_.loc[bigdata_[bigdata_['dataPeriod']=='2019年04月'].index]['group1']\n \n #去重,难免有些失误\n bigdata_.duplicated()\n df = bigdata_.drop_duplicates()\n df = df.reset_index(drop=True)\n df = df.loc[df['itemName'].dropna(axis=0).index]\n \n df = df.sort_values('group1')\n \n #上传数据\n from sqlalchemy import create_engine\n engine = create_engine(\"mysql+pymysql://wangquan:Wq5985790@rm-bp12z8rh0j5503p6p2o.mysql.rds.aliyuncs.com:3306/tbdata?charset=utf8\")\n df.to_sql(name = 'group_byss',con = engine,if_exists = 'append',index = False,index_label = False)\n \n#df.to_excel(\"C:/Users/htc/Desktop/shiyan1.xlsx\")\n#conn.close() \n #ss\n #sz","sub_path":"text_in.py","file_name":"text_in.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"241223015","text":"import os.path\nimport pickle\nimport requests\nfrom pyquery import PyQuery as pq\nfrom collections import OrderedDict\nfrom unicodecsv import DictWriter\n\nimport util\n\nbase_url = 'http://localhost:8080/towns/govikon'\nusername = 'admin@example.org'\npassword = 'etamogimarusakot'\n\nif 'localhost' not in base_url:\n print('Are you sure you want to do this (yes)?')\n if raw_input() != 'yes':\n exit()\n\ntagmap = {\n 'Kirche': 'Religion',\n 'Politik': 'Politik',\n 'Sport': 'Sport',\n}\n\n# Fetch event categories\nif os.path.exists('event_urls.p'):\n with open(\"event_urls.p\", \"rb\") as dumpfile:\n event_urls = pickle.load(dumpfile)\n\nelse:\n event_urls = {\n 'all': {\n 'url': (\n 'http://www.rueti.ch/dorfleben/veranstaltungen/'\n 'veranstaltungskategorien'\n )\n }\n }\n page = pq(requests.get(event_urls['all']['url']).text)\n for a in page('li.section-veranstaltungskategorien li a'):\n event_urls[pq(a).text()] = {'url': pq(a).attr('href')}\n\n # Fetch event urls\n for key in event_urls:\n event_urls[key]['events'] = []\n next_url = event_urls[key]['url']\n while next_url:\n page = pq(requests.get(next_url).text)\n event_urls[key]['events'].extend(\n [pq(a).attr('href') for a in page('a.summary.url')]\n )\n next_url = page('span.next > a').attr('href')\n\n with open(\"event_urls.p\", \"wb\") as dumpfile:\n pickle.dump(event_urls, dumpfile)\n\n\n# Fetch events\nif os.path.exists('events.p'):\n with open(\"events.p\", \"rb\") as dumpfile:\n events = pickle.load(dumpfile)\n\nelse:\n events = []\n print('Fetching {} events'.format(len(event_urls['all']['events'])))\n for event in event_urls['all']['events']:\n page = pq(requests.get(event).text)\n events.append(OrderedDict((\n ('title', page('h1').text()),\n ('start', page('#parent-fieldname-startDate').attr('title')),\n ('end', page('#parent-fieldname-endDate').attr('title')),\n ('timezone', 'Europe/Zurich'),\n # 'recurrence'\n ('tags', ','.join([\n tagmap.get(key, '') for key in event_urls\n if event in event_urls[key]['events']\n and key != 'all' and tagmap.get(key, '')\n ])),\n ('location', page('#parent-fieldname-location').text()),\n ('content_description', (u'{}\\n\\n{}\\n\\n{}\\n{}\\n{}\\n{}'.format(\n page('#parent-fieldname-description').text(),\n page('#parent-fieldname-text').text(),\n page('a.email').text(),\n page('#parent-fieldname-contactPhone').text(),\n page('a.email').attr('href').replace('mailto:', ''),\n page('#parent-fieldname-eventUrl').attr('href') or ''\n ))),\n ('meta_submitter_email', 'info@rueti.ch'),\n )))\n\n with open(\"events.p\", \"wb\") as dumpfile:\n pickle.dump(events, dumpfile)\n\n# Write output\nwith open('output.csv', 'w') as csvfile:\n writer = DictWriter(csvfile, fieldnames=events[0].keys())\n writer.writeheader()\n for event in events:\n writer.writerow(event)\n\n# Submit events\nfor event in events:\n util.submit_event(\n email=event['meta_submitter_email'],\n title=event['title'],\n description=event['content_description'],\n location=event['location'],\n start_date=event['start'].split('T')[0],\n start_time=event['start'].split('T')[1][:5],\n end_time=event['end'].split('T')[1][:5],\n base_url=base_url\n )\n\n# Publish events & close tickets\ncookies = util.login(username, password, base_url=base_url)\ntickets = util.get_tickets(cookies, category='EVN', state='open',\n base_url=base_url)\nfor ticket in tickets:\n util.accept_ticket(cookies, ticket)\n util.accept_event(cookies, ticket)\n util.close_ticket(cookies, ticket)\n","sub_path":"process_events.py","file_name":"process_events.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"429638407","text":"import numpy as np\n\ndef readfile(filename='articlewordmatrix.txt'):\n fileInfo=open('articlesmetadata.txt')\n fileMatrix=open(filename)\n\n lines = [line for line in fileMatrix]\n # First line is the column titles\n colnames = lines[0].strip().split('\\t')[1:]\n rownames = []\n data = []\n for line in lines[1:]:\n p = line.strip().split('\\t')\n # First column in each row is the rowname\n rownames.append(p[0])\n # The data for this row is the remainder of the row\n data.append([float(x) for x in p[1:]])\n sources = []\n authors = [] \n url = []\n #published = []\n #for line in fileInfo:\n # p = line.strip().split('\\t')\n # if p == ['error']:\n # p = ['None']*4\n # sources.append(p[1])\n # authors.append(p[2])\n # url.append(p[3])\n #published.append(p[4])\n fileMatrix.close()\n fileInfo.close()\n return rownames, colnames, data\n\ndef writefile(rownames, colnames, data):\n fileMatrix=open('articlewordmatrix_processed.txt','w')\n\n fileMatrix.write('Article')\n for word in colnames: fileMatrix.write('\\t%s' % word)\n fileMatrix.write('\\n')\n\n for i in range(len(rownames)):\n fileMatrix.write(rownames[i])\n row = '\\t' + '\\t'.join(map(str, data[i])) + '\\n'\n fileMatrix.write(row)\n \n fileMatrix.close()\n\n\n#Convert the data matrix to a tf matrix\ndef tf(data):\n return [termfreq(v) for v in data]\n\n#Convert the data matrix to a tf-idf matrix\ndef tfidf(data):\n data_tf = tf(data)\n idf_vec = invdocfreq(data)\n return [[c * idf for c, idf in zip(v,idf_vec)] for v in data_tf]\n\n#term frequency\ndef termfreq(v):\n return [c != 0 and 1 + np.log(c) or 0 for c in v]\n\n#inverse document frequency\t\ndef docap(data):\n N = len(data)\n return [o/N for o in [sum([c and 1 for c in x]) for x in zip(*data)]]\n\ndef invdocfreq(data):\n return [np.log(1/x) for x in docap(data)]\n\ndef pruning(data,colnames,lowerthreshold,higherthreshold):\n matrix, words = data[:], colnames[:]\n apvec = docap(matrix)\n wordstodel = []\n for i in range(len(apvec)):\n if not lowerthreshold < apvec[i] < higherthreshold:\n wordstodel = [i] + wordstodel # or append than reverse?\n for wordid in wordstodel:\n for v in matrix:\n del v[wordid]\n del words[wordid]\n return matrix,words\n\ndef truncation(data):\n pass\n\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"271612564","text":"import glob\nimport os\nfrom PIL import Image\nfrom tqdm import tqdm\nimport random\nimport subprocess\nimport sys\n\n\ndef main():\n process_dir = \"./data/img/\"\n \n try:\n os.mkdir(\"./data\")\n except OSError:\n print(\"Creation of the directory failed\")\n else:\n print(\"Successfully created the directory\")\n\n try:\n os.mkdir(process_dir)\n except OSError:\n print(\"Creation of the directory failed\")\n else:\n print(\"Successfully created the directory\")\n\n\n dir = input(\"Enter image directory: \")\n image_path = \"{}/*.jpg\".format(dir)\n\n for file_path in tqdm(glob.glob(image_path)):\n\n file, ext = os.path.splitext(file_path)\n file_name = file_path.split(\"\\\\\")[-1].split(\".\")[0]\n\n # file_name = file_path.split(\"\\\\\")[-1]\n im = Image.open(file_path)\n width, height = im.size\n\n im.thumbnail((width / 2, height / 2))\n im.save(process_dir + file_name + \".resize.jpeg\", \"JPEG\")\n\n rotate_image90 = im.rotate(angle=90)\n rotate_image90.save(process_dir +\n file_name + \".rotate90.jpeg\", \"jpeg\")\n\n rotate_image180 = im.rotate(angle=180)\n rotate_image180.save(process_dir +\n file_name + \".rotate180.jpeg\", \"jpeg\")\n\n rotate_image270 = im.rotate(angle=270)\n rotate_image270.save(process_dir +\n file_name + \".rotate270.jpeg\", \"jpeg\")\n\n split_data_set(process_dir)\n\n\ndef split_data_set(image_dir):\n\n f_val = open(\"data/test.txt\", 'w')\n f_train = open(\"data/train.txt\", 'w')\n\n path, dirs, files = next(os.walk(image_dir))\n data_size = len(files)\n\n ind = 0\n data_test_size = int(0.1 * data_size)\n test_array = random.sample(range(data_size), k=data_test_size)\n count = 0\n for f in os.listdir(image_dir):\n count += 1\n print(count)\n file_extension = f.split(\".\")[-1]\n local_file_reference = image_dir.split(\"/\")\n\n if(file_extension == \"jpg\" or file_extension == \"jpeg\"):\n ind += 1\n\n if ind in test_array:\n f_val.write(local_file_reference[1]+ '/' + local_file_reference[2] + '/' +f+'\\n')\n else:\n f_train.write(local_file_reference[1]+ '/' + local_file_reference[2] + '/' +f+'\\n')\n\nif __name__ == \"__main__\":\n main()","sub_path":"prepareDataset.py","file_name":"prepareDataset.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"163482574","text":"import sys\nimport logging\n\nimport grpc\nimport concurrent.futures as futures\n\nimport service.common\nimport service.image_recon as img_recon\nfrom service import flowers_map_names, dogs_map_names, cars_map_names\n\n# Importing the generated codes from buildproto.sh\nimport service.service_spec.image_recon_pb2_grpc as grpc_bt_grpc\nfrom service.service_spec.image_recon_pb2 import Result\n\nlogging.basicConfig(level=10, format=\"%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s\")\nlog = logging.getLogger(\"image_recon_service\")\n\n\n# Create a class to be added to the gRPC server\n# derived from the protobuf codes.\nclass FlowersServicer(grpc_bt_grpc.FlowersServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n\n # Just for debugging purpose.\n log.debug(\"FlowersServicer created\")\n\n # The method that will be exposed to the snet-cli call command.\n # request: incoming data\n # context: object that provides RPC-specific information (timeout, etc).\n def flowers(self, request, context):\n # In our case, request is a Numbers() object (from .proto file)\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = flowers_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"flowers\", self.model, map_names, self.img_path, image_dims)\n\n # To respond we need to create a Result() object (from .proto file)\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"flowers({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\nclass DogsServicer(grpc_bt_grpc.DogsServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n log.debug(\"DogsServicer created\")\n\n def dogs(self, request, context):\n\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = dogs_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"dogs\", self.model, map_names, self.img_path, image_dims)\n\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"dogs({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\nclass CarsServicer(grpc_bt_grpc.CarsServicer):\n def __init__(self):\n self.model = \"ResNet152\"\n self.img_path = \"\"\n self.result = \"Fail\"\n log.debug(\"CarsServicer created\")\n\n def cars(self, request, context):\n\n self.img_path = request.img_path\n self.model = request.model\n\n map_names = cars_map_names\n image_dims = (3, 224, 224)\n json_result = img_recon.image_recognition(\"cars\", self.model, map_names, self.img_path, image_dims)\n\n self.result = Result()\n self.result.top_5 = str(json_result[\"top_5\"]).encode(\"utf-8\")\n self.result.delta_time = str(json_result[\"delta_time\"]).encode(\"utf-8\")\n log.debug(\"cars({},{})={}\".format(self.model, self.img_path, self.result.top_5))\n return self.result\n\n\n# The gRPC serve function.\n#\n# Params:\n# max_workers: pool of threads to execute calls asynchronously\n# port: gRPC server port\n#\n# Add all your classes to the server here.\ndef serve(max_workers=10, port=7777):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))\n grpc_bt_grpc.add_FlowersServicer_to_server(FlowersServicer(), server)\n grpc_bt_grpc.add_DogsServicer_to_server(DogsServicer(), server)\n grpc_bt_grpc.add_CarsServicer_to_server(CarsServicer(), server)\n server.add_insecure_port(\"[::]:{}\".format(port))\n return server\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Runs the gRPC server to communicate with the Snet Daemon.\n \"\"\"\n parser = service.common.common_parser(__file__)\n args = parser.parse_args(sys.argv[1:])\n service.common.main_loop(serve, args)\n","sub_path":"Services/gRPC/cntk-image-recon/service/image_recon_service.py","file_name":"image_recon_service.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"31991556","text":"\n\nfrom xai.brain.wordbase.nouns._ambulance import _AMBULANCE\n\n#calss header\nclass _AMBULANCES(_AMBULANCE, ):\n\tdef __init__(self,): \n\t\t_AMBULANCE.__init__(self)\n\t\tself.name = \"AMBULANCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ambulance\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ambulances.py","file_name":"_ambulances.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"91204885","text":"#bingo program LC\n#made by Casper Dik\n\ndef create_bingo_card():\n import random\n import numpy\n\n read = open(\"terms.txt\").read()\n read = read[0:]\n list = read.split()\n list_3 = list.copy()\n card = []\n\n for i in range(25):\n random_word = random.choice(list_3)\n card.append(random_word) # create list with 25 terms\n list_3.remove(random_word)\n\n bingo_card = numpy.array(card)\n bingo_card = bingo_card.reshape([5, 5]) # reshape list in 5x5 grid\n print(bingo_card)\n numpy.savetxt(\"bingo_card.txt\", numpy.array(bingo_card), fmt=\"%s\")\n\ndef generate_bingo_cards():\n new_card = \"no\"\n create_bingo_card()\n new_card = input(\"Do you want to generate a new bingo card: \")\n while (new_card == \"yes\"):\n create_bingo_card()\n new_card = input(\"Do you want to generate a new bingo card: \")\n\ngenerate_bingo_cards()","sub_path":"assessment_basic_track/create_card.py","file_name":"create_card.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"79788134","text":"#!/usr/bin/env python\n\"\"\"\nAuthor: Tianyi Gu\nDate: Mar / 7 / 2017\nDesc: Controller node \n\"\"\"\nimport sys\nsys.path.insert(0, \"../../../doc/motionPrimitive/\")\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Vector3\nfrom threading import Thread\nfrom primutils import Primitive, read_primitives, read_primitives_with_duration\nfrom collections import defaultdict\nimport itertools\nimport tf\nfrom nav_msgs.msg import Odometry\n\nactionQueue = []\nmotions = defaultdict(list)\nduration = 0.0\ncurrentTwist = Twist()\n\n\ndef executive_callback(data):\n global actionQueue\n rospy.loginfo(rospy.get_caller_id() + 'Executive give me the motion: %s',\n data.data)\n actionQueue.append(data.data)\n\ndef executive_listener():\n rospy.Subscriber('controller_msg', String, executive_callback)\n rospy.spin()\n\ndef pose_callback(data):\n global currentTwist\n rospy.loginfo(rospy.get_caller_id() + 'Get latest pose info: \\n' + \n \"linear x: %.2f\" % data.twist.linear.x + \"\\n\" + \n \"linear y: %.2f\" % data.twist.linear.y+\"\\n\"+\n \"linear z: %.2f\" % data.twist.linear.z+\"\\n\"+\n \"angular x: %.2f\" % data.twist.angular.x+\"\\n\"+\n \"angular y: %.2f\" % data.twist.angular.y+\"\\n\"+\n \"angular z: %.2f\" % data.twist.angular.z+\"\\n\")\n currentTwist = data.twist\n\ndef pose_listener():\n rospy.Subscriber('pose', Odometry, pose_callback)\n rospy.spin()\n\ndef move():\n global actionQueue\n #here,we publish actions to the topic 'cmd_vel'\n pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n speedListener = tf.TransformListener()\n #rospy.init_node('controller_publisher', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n while actionQueue:\n motionStr = actionQueue.pop(0)\n currentMotion = motions[motionStr]\n motion = currentTwist\n motion.linear.x += currentMotion[0] * duration\n motion.angular.z += currentMotion[1] * duration\n rospy.loginfo(\"controller publish action: \\n\" + \n \"linear x: %.2f\" % (motion.linear.x) + \"\\n\" + \n \"linear y: %.2f\" % motion.linear.y+\"\\n\"+\n \"linear z: %.2f\" % motion.linear.z+\"\\n\"+\n \"angular x: %.2f\" % motion.angular.x+\"\\n\"+\n \"angular y: %.2f\" % motion.angular.y+\"\\n\"+\n \"angular z: %.2f\" % motion.angular.z+\"\\n\" +\n \"duration: \" + str(duration))\n pub.publish(motion)\n rospy.sleep(rospy.Duration(duration))\n motion = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))\n rospy.logerr(\"Action Queue is empty!\")\n pub.publish(motion)\n rate.sleep()\n\ndef init_motions():\n global duration\n (primitives, duration) = read_primitives_with_duration(\"../../../doc/motionPrimitive/primitives.txt\")\n dupMotions = [[p.name, p.va, p.wa] for p in primitives]\n dupMotions.sort()\n filterMotions = [m for m, _ in itertools.groupby(dupMotions)]\n for i in filterMotions:\n motions[i[0]].append(i[1])\n motions[i[0]].append(i[2])\n # print i\n #print motions[\"a6\"] \n\nif __name__ == '__main__':\n init_motions();\n rospy.init_node('controller_node', anonymous=True)\n execListernerThread = Thread(target=executive_listener, args=())\n poseListernerThread = Thread(target=pose_listener, args=())\n #execListernerThread.setDaemon(True)\n execListernerThread.start()\n controllerPublisherThread = Thread(target=move, args=())\n #controllerPublisherThread.setDaemon(True)\n controllerPublisherThread.start()\n \n # try:\n # move()\n # except rospy.ROSInterruptException:\n # pass\n","sub_path":"src/pioneer_hallway/controller/obsolete/v1_bak_controller_node.py","file_name":"v1_bak_controller_node.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"620903593","text":"from flask_restful import Resource, reqparse\nfrom flask import request\nfrom models.promocode import PromoCodeModel\nfrom flask_restful_swagger import swagger\nfrom datetime import datetime\n\nclass PromoCode(Resource):\n\n\tparser = reqparse.RequestParser()\n\tparser.add_argument('promo_code',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code is Required\"\n\t\t\t)\n\tparser.add_argument('promo_discount_per',\n\t\t\ttype = int,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code Discount Percentage is Required\"\n\t\t\t)\n\tparser.add_argument('promo_validity',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Validity is Required. Format: YYYY-MM-DD\"\n\t\t\t)\n\tparser.add_argument('promo_wallet',\n\t\ttype = bool,\n\t\trequired = True,\n\t\thelp = \"Cashback on wallet or Discount on Order ?? \")\n\tparser.add_argument('promo_user', \n\t\ttype=bool,\n\t\trequired = True,\n\t\thelp = \"Promo Code Valid For all Users Or Not\")\n\tparser.add_argument('promo_description',\n\t\ttype = str,\n\t\trequired = True,\n\t\thelp = \"Promo Code Description\")\n\tparser.add_argument('promo_url',\n\t\ttype = str,\n\t\trequired = False)\n\n\t@swagger.operation(\n\t\tnotes='Adding A Promo Code',\n\t\tnickname='POST',\n\t\tparameters=[\n\t\t\t{\n\t\t\t\t\"name\": \"promo_code\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"string\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_discount_per\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\":\"int\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_validity\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Date\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\":\"promo_wallet\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Boolean\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_user\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Boolean\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_description\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_url\",\n\t\t\t\t\"required\": False,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t}]\n\t\t)\n\tdef post(self):\n\t\tdata = PromoCode.parser.parse_args()\n\t\tdatetime_object = datetime.strptime(data['promo_validity'],'%Y-%m-%d')\n\t\tif data['promo_url'] is None:\n\t\t\tpromocode = PromoCodeModel(data['promo_code'], data['promo_discount_per'], datetime_object, data[\"promo_wallet\"], data[\"promo_user\"], data[\"promo_description\"],None)\n\t\telse:\n\t\t\tpromocode = PromoCodeModel(data['promo_code'], data['promo_discount_per'], datetime_object, data[\"promo_wallet\"], data[\"promo_user\"], data[\"promo_description\"],data[\"promo_url\"])\n\t\ttry:\n\t\t\tpromocode.save_to_db()\n\t\texcept:\n\t\t\treturn {'data':{\"status\": False}}, 500\n\t\t\n\t\treturn {'data':{'status': True, 'promocode': promocode.json()}}, 201\n\n\t@swagger.operation(\n\t\tnotes='Get List of all Promo Code',\n\t\tnickname='GET'\n\t\t)\n\n\tdef get(self):\n\n\t\treturn {'data':{'promocode': [promo.json() for promo in PromoCodeModel.query.all()]}}\n\nclass PromoCodeEdit(Resource):\n\n\tparser = reqparse.RequestParser()\n\tparser.add_argument('promo_validity',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \"Promo Code Validity is Required.. Format: YYYY-MM-DD\"\n\t\t\t)\n\tparser.add_argument('promo_description',\n\t\t\ttype = str,\n\t\t\trequired = True,\n\t\t\thelp = \" Promo Code Desciption\")\n\n\t@swagger.operation(\n\t\tnotes='Edit a Promo Code Validity',\n\t\tnickname='PUT',\n\t\tparameters=[\n\t\t\t{\n\t\t\t\t\"name\": \"promo_id\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"int\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_validity\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"Date\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"name\": \"promo_description\",\n\t\t\t\t\"required\": True,\n\t\t\t\t\"dataType\": \"String\"\n\t\t\t}]\n\t\t)\n\n\tdef put(self, promo_id):\n\n\t\tpromocode = PromoCodeModel.find_by_promo_id(promo_id)\n\t\tdata = PromoCodeEdit.parser.parse_args()\n\t\tif promocode:\n\t\t\tpromocode.promo_validity = data['promo_validity']\n\t\t\tpromocode.promo_description = data['desciption']\n\t\t\tpromocode.save_to_db()\n\t\t\treturn {'data':{'status': True, 'promocode': promocode.json()}}\n\n\t\treturn {'data': {'status': False}}\n\n\t# @swagger.operation(\n\t# \tnotes='Delete a Promo Code',\n\t# \tnickname='DELETE',\n\t# \tparameters=[\n\t# \t\t{\n\t# \t\t\t\"name\": \"promo_id\",\n\t# \t\t\t\"required\": True,\n\t# \t\t\t\"dataType\": \"int\"\n\t# \t\t}]\n\t# \t)\n\n\t# def delete(self, promo_id):\n\n\t# \tpromocode = PromoCodeModel.find_by_promo_id(promo_id)\n\t# \tif promocode:\n\t# \t\tpromocode.delete_from_db()\n\t# \t\treturn {'data': {'status': True}}\n\t# \treturn {'data': {'status': False}}\n\nclass PromoCodeForAll(Resource):\n\n\n\t@swagger.operation(\n\t\tnotes = \"List All Promo Codes Valid For All Users\",\n\t\tnickname = 'GET')\n\n\tdef get(self):\n\t\tdate = datetime.now().date()\n\n\t\treturn {'data':{'promocode': [promo.json() for promo in PromoCodeModel.query.filter(PromoCodeModel.promo_validity >= date, PromoCodeModel.promo_user == 0)]}}\n\n\n\n\n\n \n\n","sub_path":"resources/promocode.py","file_name":"promocode.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"117594392","text":"import numpy as np\nimport math\n\n\n# helper class to load and store input data\nclass Data(object):\n def __init__(self, file, subsample_rate=1e-4):\n\n print('loading corpus...')\n # initialize data, vocab, negative, freq, and context\n self.init_vocab(file)\n print('init negative sampling..')\n self.init_negative()\n\n print('subsampling...')\n self.subsample(subsample_rate)\n\n print('initializing contexts...')\n self.init_context()\n\n def init_vocab(self, file):\n buf = open(file).read()\n self.tks = buf.split(' ')\n self.vocab = {}\n self.freq = []\n self.data = []\n\n for tk in self.tks:\n if len(tk) == 0:\n continue\n if tk not in self.vocab:\n index = len(self.vocab) // 2\n self.vocab[tk] = index\n self.vocab[index] = tk\n self.freq.append(0)\n wid = self.vocab[tk]\n self.data.append(wid)\n self.freq[wid] += 1\n\n def init_negative(self):\n # initialize list to select negative samples\n self.negative = []\n for i, count in enumerate(self.freq):\n if count < 5:\n continue\n n = int(math.pow(count * 1.0, 0.75))\n self.negative += [i for _ in range(n)]\n\n def subsample(self, t=1e-3):\n # initialize probability to remove frequent tokens\n subsampling = [0 for i in range(len(self.vocab))]\n corpus_size = float(len(self.data))\n\n for i, count in enumerate(self.freq):\n f = count / corpus_size\n keep_probability = (math.pow(t / f, 0.5)) + (t / f)\n subsampling[i] = keep_probability\n\n # remove based on subsampling\n i = 0\n delete = [False for i in range(len(self.data))]\n while i < len(self.data):\n if np.random.rand() > subsampling[self.data[i]]:\n self.freq[self.data[i]] -= 1\n delete[i] = True\n else:\n i += 1\n\n self.data[:] = [self.data[i] for i in range(len(self.data)) if not delete[i]]\n\n def init_context(self):\n # map vocab to position for context evaluation\n self.context = [[] for word in self.vocab]\n for i in range(len(self.data)):\n self.context[self.data[i]].append(i)\n def get_data(self):\n return self.data, self.negative, self.vocab, self.freq\n\ndef print_window(data, index, window_size=5):\n str = ''\n\n for i in range(2 * window_size + 1):\n curr = i - window_size + index\n if 0 <= curr < len(data.data):\n str += data.vocab[data.data[curr]] + ' '\n print(str)\n\n\n# loads a word embedding file into an embed matrix\ndef load_wordvec(file, vocab={}):\n print('loading word embeddings...')\n\n embed = np.array([None for i in range(len(vocab))])\n index = 0\n no_vocab = len(vocab) == 0\n with open(file, 'r') as f:\n for line in f:\n buf = line.split(' ')\n word = buf[0]\n vector = [float(f) for f in buf[1:-1]]\n if no_vocab:\n embed.append(vector)\n vocab[word] = index\n index += 1\n else:\n embed[vocab[word]] = np.array(vector)\n\n return vocab, embed\n\n\n# returns the avg windows of embedded contexts for the specific word\ndef get_contexts(word, data, embed, window_size=5):\n index = data.vocab[word]\n contexts = data.context[index]\n\n embed_contexts = np.empty(shape=(0, len(embed[0])))\n for c in contexts:\n window = []\n\n for i in range(window_size * 2 + 1):\n idx = c + i - window_size\n if i != window_size and 0 <= idx <= len(data.data):\n if embed[data.data[idx]] is not None:\n window.append(data.data[idx])\n context = np.average(embed[window], axis=0).reshape(1, len(embed[0]))\n embed_contexts = np.append(embed_contexts, context, axis=0)\n\n return embed_contexts\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"380386389","text":"from django.db import models\nfrom datetime import datetime\nfrom users.models import Adduser\n\n# Create your models here.\nclass Blogadd(models.Model):\n author = models.ForeignKey(to=Adduser,on_delete=models.CASCADE)\n title = models.CharField(max_length=100)\n blog = models.CharField(max_length=1000)\n date = models.DateField(default=datetime.now())\n\n def __str__(self):\n return f\"{self.author}\"\n","sub_path":"djangoproject/myproject/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"580410410","text":"#\n# @lc app=leetcode id=96 lang=python3\n#\n# [96] Unique Binary Search Trees\n#\nclass Solution:\n def numTrees(self, n: int) -> int:\n '''\n 卡特兰数\n 把 n = 0 时赋为1,因为空树也算一种二叉搜索树,那么 n = 1 时的情况可以看做是其左子树个数乘以右子树的个数,\n 左右子树都是空树,所以1乘1还是1。那么 n = 2 时,由于1和2都可以为根,分别算出来,再把它们加起来即可。\n n = 2 的情况可由下面式子算出(这里的 dp[i] 表示当有i个数字能组成的 BST 的个数):\n\n dp[2] = dp[0] * dp[1] (1为根的情况,则左子树一定不存在,右子树可以有一个数字)\n\n + dp[1] * dp[0] (2为根的情况,则左子树可以有一个数字,右子树一定不存在)\n\n 同理可写出 n = 3 的计算方法:\n\n dp[3] = dp[0] * dp[2] (1为根的情况,则左子树一定不存在,右子树可以有两个数字)\n\n + dp[1] * dp[1] (2为根的情况,则左右子树都可以各有一个数字)\n\n + dp[2] * dp[0] (3为根的情况,则左子树可以有两个数字,右子树一定不存在)\n '''\n if not n: return 1\n dp = [0]*(n+1)\n dp[0], dp[1] = 1, 1\n for i in range(2,n+1):\n for j in range(i):\n dp[i] += dp[j]*dp[i-j-1]\n return dp[-1]\n\n","sub_path":"96.unique-binary-search-trees.py","file_name":"96.unique-binary-search-trees.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"176597168","text":"import sys\nimport getopt\nimport logging\nfrom yt_concat.pipeline.pipeline import Pipeline\nfrom yt_concat.utils import Utils\nfrom yt_concat.pipeline.steps.preflight import Preflight\nfrom yt_concat.pipeline.steps.get_video_list import GetVideoList\nfrom yt_concat.pipeline.steps.initialize_yt import InitializeYT\nfrom yt_concat.pipeline.steps.search import Search\nfrom yt_concat.pipeline.steps.download_videos import DownloadVideos\nfrom yt_concat.pipeline.steps.edit_videos import EditVideos\nfrom yt_concat.pipeline.steps.postflight import Postflight\n\n\nCHANNEL_ID = 'UCKSVUHI9rbbkXhvAXK-2uxA'\n\n\ndef print_usage():\n print('python main.py OPTIONS')\n print('OPTIONS:')\n print('{:>6} {:<17} {}'.format('-c', '--channel_id', 'Channel ID of your target youtube channel'))\n print('{:>6} {:<17} {}'.format('-s', '--search_word', 'The word that you want to capture in videos'))\n print('{:>6} {:<17} {}'.format('-l', '--limit', 'The maximum number of capture videos in the output video'))\n print('{:>6} {:<17} {}'.format('-g', '--logging_level', 'The logging level shown on the CMD screen. '\n '[Fill a number only] '\n '[1:DEBUG, 2:INFO, 3:WARNING, 4:ERROR, 5:CRITICAL]'))\n print('{:<24} {}'.format('cleanup', 'Remove all downloaded videos'))\n print('{:<24} {}'.format('fast', 'Skip downloading video list and videos if exist'))\n\n\ndef command_line_arg():\n channel_id = CHANNEL_ID\n search_word = 'incredible'\n limit = 30\n logging_level = logging.DEBUG\n cleanup = False\n fast = False\n short_opt = 'hc:s:l:g:'\n long_opt = 'help channel_id= search_word= limit= logging_level= cleanup fast'.split()\n try:\n opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)\n print(opts)\n except getopt.GetoptError:\n print_usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print_usage()\n sys.exit()\n elif opt in (\"-c\", \"--channel_id\"):\n channel_id = arg\n elif opt in (\"-s\", \"--search_word\"):\n search_word = arg\n elif opt in (\"-l\", \"--limit\"):\n limit = int(arg)\n elif opt in (\"-g\", \"--logging_level\"):\n if arg == '1':\n logging_level = logging.DEBUG\n elif arg == '2':\n logging_level = logging.INFO\n elif arg == '3':\n logging_level = logging.WARNING\n elif arg == '4':\n logging_level = logging.ERROR\n elif arg == '5':\n logging_level = logging.CRITICAL\n elif opt == '--cleanup':\n cleanup = True\n elif opt == '--fast':\n fast = True\n return channel_id, search_word, limit, logging_level, cleanup, fast\n\n\ndef config_logger(logging_level):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s:%(asctime)s:%(message)s')\n file_handler = logging.FileHandler('yt_concat_logging.log')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging_level)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef main():\n channel_id, search_word, limit, logging_level, cleanup, fast = command_line_arg()\n inputs = {\n 'channel_id': channel_id,\n 'search_word': search_word,\n 'limit': limit,\n 'logging_level': logging_level,\n 'cleanup': cleanup,\n 'fast': fast,\n }\n\n steps = [\n Preflight(),\n GetVideoList(),\n InitializeYT(),\n Search(),\n DownloadVideos(),\n EditVideos(),\n Postflight(),\n ]\n\n logger = config_logger(logging_level)\n utils = Utils()\n p = Pipeline(steps)\n p.run(inputs, utils, logger)\n\n\nif __name__ == '__main__':\n main()","sub_path":"yt_concat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"482628563","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets, Qt\r\nimport sys\r\nimport time\r\nfrom winchat import *\r\n\r\n\r\nclass Main_1(object):\r\n\r\n def setupUi(self, Form):\r\n Form.setObjectName(\"Form\")\r\n Form.resize(275, 700)\r\n Form.setWindowFlags(QtCore.Qt.FramelessWindowHint)\r\n Form.setMinimumSize(QtCore.QSize(275, 700))\r\n Form.setMaximumSize(QtCore.QSize(275, 700))\r\n # font = QtGui.QFont()\r\n # font.setFamily(\"Jokerman\")\r\n # Form.setFont(font)\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"images/climb.jpg\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n Form.setWindowIcon(icon)\r\n\r\n font1 = QtGui.QFont()\r\n font1.setFamily(\"Arial\")\r\n font1.setPointSize(14)\r\n font1.setBold(False)\r\n # font1.setWeight(10)\r\n self.label_char = QtWidgets.QLabel(Form)\r\n self.label_char.setGeometry(QtCore.QRect(0, 0, 80, 25))\r\n self.label_char.setText(\" iClimb\")\r\n self.label_char.setObjectName(\"label_char\")\r\n self.label_char.setFont(font1)\r\n\r\n font2 = QtGui.QFont()\r\n font2.setFamily(\"Gulim\")\r\n font2.setPointSize(18)\r\n font2.setBold(False)\r\n # font2.setWeight(10)\r\n font2.setUnderline(False)\r\n self.label_mini = QtWidgets.QLabel(Form)\r\n self.label_mini.setGeometry(QtCore.QRect(225, 0, 25, 25))\r\n self.label_mini.setText(\r\n \"-\")\r\n self.label_mini.linkActivated.connect(self.showMinimized)\r\n self.label_mini.setObjectName(\"label_mini\")\r\n self.label_mini.setFont(font2)\r\n self.label_close = QtWidgets.QLabel(Form)\r\n self.label_close.setGeometry(QtCore.QRect(250, 0, 25, 25))\r\n self.label_close.setText(\r\n \"×\")\r\n self.label_close.linkActivated.connect(self.close)\r\n self.label_close.setObjectName(\"label_close\")\r\n self.label_close.setFont(font2)\r\n\r\n self.graphicsView_profile = QtWidgets.QGraphicsView(Form)\r\n self.graphicsView_profile.setGeometry(QtCore.QRect(15, 35, 90, 90))\r\n self.graphicsView_profile.setObjectName(\"graphicsView_profile\")\r\n self.textBrowser_weather = QtWidgets.QTextBrowser(Form)\r\n self.textBrowser_weather.setGeometry(QtCore.QRect(120, 30, 140, 70))\r\n self.textBrowser_weather.setObjectName(\"textBrowser_weather\")\r\n self.textEdit_city = QtWidgets.QTextEdit(Form)\r\n self.textEdit_city.setGeometry(QtCore.QRect(120, 105, 140, 20))\r\n self.textEdit_city.setObjectName(\"textEdit_city\")\r\n\r\n self.tabWidget = QtWidgets.QTabWidget(Form)\r\n self.tabWidget.setGeometry(QtCore.QRect(0, 150, 275, 500))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Viner Hand ITC\")\r\n font.setPointSize(10)\r\n font.setBold(False)\r\n # font.setWeight(50)\r\n self.tabWidget.setFont(font)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n\r\n self.tab_chatting = QtWidgets.QWidget()\r\n self.tab_chatting.setObjectName(\"tab_chatting\")\r\n self.listWidget_chatting = QtWidgets.QListWidget(self.tab_chatting)\r\n self.listWidget_chatting.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_chatting.setIconSize(QtCore.QSize(70, 70))\r\n self.listWidget_chatting.setObjectName(\"listWidget_chatting\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_chatting.addItem(item)\r\n self.tabWidget.addTab(self.tab_chatting, \"\")\r\n\r\n self.tab_friends = QtWidgets.QWidget()\r\n self.tab_friends.setObjectName(\"tab_friends\")\r\n self.listWidget_friends = QtWidgets.QListWidget(self.tab_friends)\r\n self.listWidget_friends.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_friends.setIconSize(QtCore.QSize(70, 70))\r\n self.listWidget_friends.setObjectName(\"listWidget_friends\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_friends.addItem(item)\r\n self.tabWidget.addTab(self.tab_friends, \"\")\r\n\r\n self.tab_groups = QtWidgets.QWidget()\r\n self.tab_groups.setObjectName(\"tab_groups\")\r\n self.listWidget_groups = QtWidgets.QListWidget(self.tab_groups)\r\n self.listWidget_groups.setGeometry(QtCore.QRect(0, 0, 275, 500))\r\n self.listWidget_groups.setIconSize(QtCore.QSize(50, 50))\r\n self.listWidget_groups.setObjectName(\"listWidget_groups\")\r\n item = QtWidgets.QListWidgetItem()\r\n self.listWidget_groups.addItem(item)\r\n self.tabWidget.addTab(self.tab_groups, \"\")\r\n\r\n self.tab_apps = QtWidgets.QWidget()\r\n self.tab_apps.setObjectName(\"tab_apps\")\r\n self.calendarWidget = QtWidgets.QCalendarWidget(self.tab_apps)\r\n self.calendarWidget.setGeometry(QtCore.QRect(0, 250, 271, 221))\r\n self.calendarWidget.setObjectName(\"calendarWidget\")\r\n self.label = QtWidgets.QLabel(self.tab_apps)\r\n self.label.setGeometry(QtCore.QRect(10, 20, 54, 12))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Small Fonts\")\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label.setFont(font)\r\n self.label.setTextFormat(QtCore.Qt.RichText)\r\n self.label.setObjectName(\"label\")\r\n self.label_2 = QtWidgets.QLabel(self.tab_apps)\r\n self.label_2.setGeometry(QtCore.QRect(10, 130, 54, 12))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Small Fonts\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_2.setFont(font)\r\n self.label_2.setObjectName(\"label_2\")\r\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_3.setGeometry(QtCore.QRect(20, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_3.setFont(font)\r\n self.pushButton_3.setObjectName(\"pushButton_3\")\r\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_4.setGeometry(QtCore.QRect(20, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_4.setFont(font)\r\n self.pushButton_4.setObjectName(\"pushButton_4\")\r\n self.pushButton_5 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_5.setGeometry(QtCore.QRect(100, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_5.setFont(font)\r\n self.pushButton_5.setObjectName(\"pushButton_5\")\r\n self.pushButton_6 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_6.setGeometry(QtCore.QRect(180, 50, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_6.setFont(font)\r\n self.pushButton_6.setObjectName(\"pushButton_6\")\r\n self.pushButton_7 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_7.setGeometry(QtCore.QRect(100, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_7.setFont(font)\r\n self.pushButton_7.setObjectName(\"pushButton_7\")\r\n self.pushButton_8 = QtWidgets.QPushButton(self.tab_apps)\r\n self.pushButton_8.setGeometry(QtCore.QRect(180, 160, 60, 40))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Papyrus\")\r\n self.pushButton_8.setFont(font)\r\n self.pushButton_8.setObjectName(\"pushButton_8\")\r\n self.tabWidget.addTab(self.tab_apps, \"\")\r\n\r\n self.pushButton_set = QtWidgets.QPushButton(Form)\r\n self.pushButton_set.setGeometry(QtCore.QRect(0, 675, 25, 25))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Arial\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n font.setKerning(False)\r\n self.pushButton_set.setFont(font)\r\n self.pushButton_set.setObjectName(\"pushButton_set\")\r\n self.pushButton_add = QtWidgets.QPushButton(Form)\r\n self.pushButton_add.setGeometry(QtCore.QRect(25, 675, 25, 25))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Arial\")\r\n font.setPointSize(18)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.pushButton_add.setFont(font)\r\n self.pushButton_add.setAutoRepeatInterval(102)\r\n self.pushButton_add.setObjectName(\"pushButton_add\")\r\n self.lcdNumber_time = QtWidgets.QLCDNumber(Form)\r\n self.lcdNumber_time.setGeometry(QtCore.QRect(100, 675, 150, 25))\r\n self.lcdNumber_time.setObjectName(\"lcdNumber_time\")\r\n\r\n self.retranslateUi(Form)\r\n self.tabWidget.setCurrentIndex(0)\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self, Form):\r\n _translate = QtCore.QCoreApplication.translate\r\n Form.setWindowTitle(_translate(\"Form\", \"iClimb\"))\r\n __sortingEnabled = self.listWidget_chatting.isSortingEnabled()\r\n self.listWidget_chatting.setSortingEnabled(False)\r\n item = self.listWidget_chatting.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_chatting.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_chatting), _translate(\"Form\", \"Chatting\"))\r\n __sortingEnabled = self.listWidget_friends.isSortingEnabled()\r\n self.listWidget_friends.setSortingEnabled(False)\r\n item = self.listWidget_friends.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_friends.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_friends), _translate(\"Form\", \"Friends\"))\r\n __sortingEnabled = self.listWidget_groups.isSortingEnabled()\r\n self.listWidget_groups.setSortingEnabled(False)\r\n item = self.listWidget_groups.item(0)\r\n item.setText(_translate(\"Form\", \"New Item\"))\r\n\r\n self.listWidget_groups.setSortingEnabled(__sortingEnabled)\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_groups), _translate(\"Form\", \"Groups\"))\r\n self.label.setText(_translate(\"Form\", \"TOOLS\"))\r\n self.label_2.setText(_translate(\"Form\", \"GAMES\"))\r\n self.pushButton_3.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_4.setText(_translate(\"Form\", \"Pan\"))\r\n self.pushButton_5.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_6.setText(_translate(\"Form\", \"BLOG\"))\r\n self.pushButton_7.setText(_translate(\"Form\", \"Pan\"))\r\n self.pushButton_8.setText(_translate(\"Form\", \"Pan\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(\r\n self.tab_apps), _translate(\"Form\", \"APPS\"))\r\n self.pushButton_set.setText(_translate(\"Form\", \"≡\"))\r\n self.pushButton_add.setText(_translate(\"Form\", \"+\"))\r\n\r\n\r\nclass MainWin(Main_1, QtWidgets.QWidget):\r\n\r\n def __init__(self, info_my, parent=None):\r\n super().__init__(parent)\r\n self.setupUi(self)\r\n self.info_my = info_my\r\n # self.start_m(self.info_my)\r\n self.chat = {}\r\n\r\n def mousePressEvent(self, event):\r\n if event.button() == QtCore.Qt.LeftButton:\r\n self.m_drag = True\r\n self.m_DragPosition = event.globalPos() - self.pos()\r\n event.accept()\r\n self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))\r\n\r\n def mouseMoveEvent(self, QMouseEvent):\r\n if QtCore.Qt.LeftButton and self.m_drag:\r\n self.move(QMouseEvent.globalPos() - self.m_DragPosition)\r\n QMouseEvent.accept()\r\n\r\n def mouseReleaseEvent(self, QMouseEvent):\r\n self.m_drag = False\r\n self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))\r\n\r\n def start_m(self, info_my):\r\n for i in info_my[1]:\r\n item1 = QtWidgets.QListWidgetItem()\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(r\"images/\" + info_my[1][i] + \".ico\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item1.setIcon(icon)\r\n item1.setText(i)\r\n self.listWidget_friends.addItem(item1)\r\n self.listWidget_friends.itemDoubleClicked.connect(self.chatting)\r\n for j in info_my[2]:\r\n item = QtWidgets.QListWidgetItem()\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(r\"images/群聊.jfif\"),\r\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon)\r\n item.setText(j)\r\n self.listWidget_groups.addItem(item)\r\n self.listWidget_groups.itemDoubleClicked.connect(self.chatting)\r\n\r\n def chatting(self, item):\r\n self.chat[item.text()] = ChatWin()\r\n self.chat[item.text()].show()\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# app = QtWidgets.QApplication(sys.argv)\r\n\r\n # mainw.show()\r\n # sys.exit(app.exec_())\r\n","sub_path":"AID1806项目/聊天室2/client10-13/2018-9-25/winmain.py","file_name":"winmain.py","file_ext":"py","file_size_in_byte":13009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"194872870","text":"# count lines that match with regex\r\n\"\"\"import re\r\n\r\nfname = 'mbox.txt'\r\nhandler = open(fname)\r\n\r\nuser_regex = input('Enter a regular expression: ')\r\n\r\nnrgx = 0\r\nfor line in handler:\r\n if re.findall(f'{user_regex}', line):\r\n nrgx += 1\r\n\r\nprint(f'{fname} had {nrgx} lines that matched {user_regex}')\"\"\"\r\n\r\n#-----------------------------------------------\r\n# 2nd version with input already written\r\nimport re\r\n\r\nfname = 'mbox.txt'\r\nhandler = open(fname)\r\n\r\ninput = ['^Author', '^X-', 'java$']\r\n\r\nfor i in range(len(input)):\r\n nrgx = 0\r\n for line in handler:\r\n if re.findall(input[i], line):\r\n nrgx += 1\r\n print(f'{fname} had {nrgx} lines that matched {input[i]}')\r\n","sub_path":"ex_11_regex/ex_11_01.py","file_name":"ex_11_01.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"76986005","text":"#!/usr/bin/env python3\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Test client for Buggy Server.\")\nparser.add_argument(\"--buggy-name\", type=str, help=\"Which buggy name to use.\",\n default=\"Transistor\")\nparser.add_argument(\"--team-name\", type=str, help=\"Which team name to use.\",\n default=\"RoboBuggy\")\nparser.add_argument(\"--hostname\", type=str, help=\"Which hostname to use for the\"\n \" data server connection.\", default=\"localhost\")\nparser.add_argument(\"--port\", type=int,\n help=\"Which port to use for the data server connection.\",\n default=4242)\nparser.add_argument(\"--key\", type=str,\n help=\"Path to key to use for authentication to the server.\",\n default=\"\")\n\nparser.add_argument('--gui', dest='gui', action='store_true')\nparser.add_argument('--no-gui', dest='gui', action='store_false')\nparser.set_defaults(gui=True)\n\nparser.add_argument('--webcam', dest='webcam', action='store_true')\nparser.add_argument('--no-webcam', dest='webcam', action='store_false')\nparser.set_defaults(webcam=False)\nparser.add_argument('--camera', dest='camera', action='store_true')\nparser.add_argument('--no-camera', dest='camera', action='store_false')\nparser.set_defaults(camera=False)\nparser.add_argument('--imu', dest='imu', action='store_true')\nparser.add_argument('--no-imu', dest='imu', action='store_false')\nparser.set_defaults(imu=False)\nparser.add_argument('--gps', dest='gps', action='store_true')\nparser.add_argument('--no-gps', dest='gps', action='store_false')\nparser.set_defaults(gps=False)\nparser.add_argument('--status', dest='status', action='store_true')\nparser.add_argument('--no-status', dest='status', action='store_false')\nparser.set_defaults(status=False)\n\nimport logging\nimport math\nimport random\nimport sys\nimport time\n\nimport cv2\nimport numpy as np\nimport tornado\nfrom auth_client import AuthClient\nfrom protos.message_pb2 import DataMessage\nfrom protos.message_pb2 import ImuMessage\nfrom protos.message_pb2 import GpsMessage\nfrom protos.message_pb2 import LogMessage\nfrom packet import Packet\n\nwords = \"\"\"\nThere is a theory which states that if ever anyone discovers exactly what the\nUniverse is for and why it is here, it will instantly disappear and be replaced\nby something even more bizarre and inexplicable. There is another theory which\nstates that this has already happened.\n\"\"\".split()\n\n\n\nclass Client(AuthClient):\n\n def __init__(self, key, team_name, buggy_name, *args, **kwargs):\n super().__init__(key, team_name, buggy_name, *args, **kwargs)\n if not cl_args.webcam:\n self.camera = None\n self.image_color = np.zeros(3, np.uint8)\n else:\n try:\n self.camera = cv2.VideoCapture(0)\n except:\n pass\n # try:\n # # Uncomment this to switch to generated colors\n # raise Exception()\n # self.camera = cv2.VideoCapture(0)\n # except:\n # self.camera = None\n # self.image_color = np.zeros(3, np.uint8)\n\n # IMU initialization\n self.imu_start = time.time()\n self.imu_period = 1 # Every second, do a full revolution\n self.imu = ImuMessage()\n\n # GPS initialization\n self.course_points = np.array([\n (40.441760, -79.941561),\n (40.440168, -79.942258),\n (40.440078, -79.943041),\n (40.439090, -79.944125),\n (40.438665, -79.945648),\n (40.438878, -79.946421),\n (40.439735, -79.946818),\n (40.440723, -79.948255),\n (40.441507, -79.947225),\n (40.440437, -79.942140),\n ])\n\n self.point_distances = np.zeros(len(self.course_points) - 1)\n for i in range(len(self.course_points) - 1):\n self.point_distances[i] = np.linalg.norm(\n self.course_points[i] - self.course_points[(i + 1)])\n self.course_distances = np.cumsum(self.point_distances)\n self.course_distances = np.insert(self.course_distances, [0], [0])\n self.total_distance = self.course_distances[-1]\n\n self.gps_start = time.time()\n self.gps_period = 120 # seconds, slightly faster than the record.\n self.gps = GpsMessage()\n self.gps.lat = self.course_points[0][0]\n self.gps.long = self.course_points[0][1]\n self.gps_distance = 0\n\n def make_timestamp(self, timestamp):\n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10**9)\n timestamp.seconds = seconds\n timestamp.nanos = nanos\n\n def make_gps_data(self, data):\n self.gps_distance += self.total_distance * (time.time() -\n self.gps_start) / self.gps_period\n self.gps_start = time.time()\n self.gps_distance %= self.total_distance\n point_left = np.searchsorted(self.course_distances, self.gps_distance)\n point_right = np.searchsorted(self.course_distances, self.gps_distance,\n \"right\")\n point_left -= point_left == point_right\n distance_from_prev_point = (self.gps_distance -\n self.course_distances[point_left])\n distance_to_next_point = self.point_distances[point_left]\n slope = self.course_points[point_right] - \\\n self.course_points[point_left]\n change = slope * (distance_from_prev_point / distance_to_next_point)\n point = self.course_points[point_left] + change\n self.gps.lat = point[0]\n self.gps.long = point[1]\n self.make_timestamp(self.gps.time)\n data.gps.CopyFrom(self.gps)\n data.data_type = DataMessage.GPS\n\n def make_status_data(self, data):\n # Just generate some fake text to make my life interesting.\n level = random.choice([\"DEBUG\", \"WARNING\", \"INFO\", \"ERROR\", \"FATAL\"])\n data.status.log_level = getattr(LogMessage, level)\n data.status.text = \" \".join(\n [random.choice(words).strip() for _ in range(10)])\n self.make_timestamp(data.status.time)\n data.data_type = DataMessage.STATUS\n\n def make_imu_data(self, data):\n time_diff = (time.time() - self.imu_start)\n self.imu.roll += ((time_diff / self.imu_period) * 2 * math.pi)\n self.imu.roll = self.imu.roll % (2 * math.pi)\n # self.imu.pitch += ((time_diff / self.imu_period) * 2 * math.pi)\n # self.imu.pitch = self.imu.pitch % (2 * math.pi)\n self.imu_start = time.time()\n # data.imu.roll = random.uniform(-1, 1)\n # data.imu.pitch = random.uniform(-2, 2)\n # data.imu.yaw = random.uniform(-3, 3)\n self.make_timestamp(self.imu.time)\n data.imu.CopyFrom(self.imu)\n data.data_type = DataMessage.IMU\n\n def make_camera_data(self, data):\n data.camera.width = 300\n data.camera.height = 300\n\n # Lets you switch between camera and generated imagery\n image = None\n if self.camera is not None:\n image = self.camera.read()[1]\n image = cv2.resize(image, (0, 0), fx=.5, fy=.5)\n data.camera.width = image.shape[1]\n data.camera.height = image.shape[0]\n\n if image is None:\n image = np.ones(\n (data.camera.height, data.camera.width, 3), np.uint8)\n image *= self.image_color\n to_add = np.array([0, 0, 0], np.uint8)\n to_add[random.randint(0, len(to_add) - 1)] = random.randint(0, 10)\n self.image_color += to_add\n\n data.camera.image = cv2.imencode(\".png\", image)[1].tostring()\n self.make_timestamp(data.camera.time)\n data.data_type = DataMessage.CAMERA\n if (cl_args.gui):\n cv2.imshow(\"TEST CLIENT\", image)\n cv2.waitKey(1)\n\n def async_send_stream(self, gen_fn):\n async def send():\n if self.stream_ok:\n try:\n data = DataMessage()\n data.robot_name = cl_args.buggy_name\n gen_fn(data)\n await self.stream.write(Packet.make_packet_from_bytes(\n data.SerializeToString()))\n except tornado.iostream.StreamClosedError as e:\n pass\n # logging.warning(\n # \"%s, unable to send message. [Hint: server may be down!]\", e)\n return send\n\n\nif __name__ == \"__main__\":\n global cl_args\n cl_args = parser.parse_args()\n logging.warning(cl_args)\n\n # Setup the client\n logging.basicConfig(level=logging.DEBUG)\n if (cl_args.key):\n with open(cl_args.key) as key_file:\n key = key_file.read().strip()\n client = Client(key, cl_args.team_name, cl_args.buggy_name,\n cl_args.hostname, cl_args.port)\n\n # Every second, try to authenticate and establish a connection.\n tornado.ioloop.PeriodicCallback(client.make_connection, 1000).start()\n # Periodically send various types of messages\n if cl_args.status:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_status_data), 5).start() # 200 hz\n if cl_args.imu:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_imu_data), 20).start() # 50 hz\n if cl_args.gps:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_gps_data), 500).start() # 1 hz\n if cl_args.camera:\n tornado.ioloop.PeriodicCallback(client.async_send_stream(\n client.make_camera_data), 50).start() # 30 hz\n\n else:\n logging.error(\"Key is invalid! Quitting program.\")\n sys.exit(1)\n\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":9819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"563498790","text":"from django.urls import path\nfrom django.views.generic import RedirectView\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import views as auth_views\nfrom .views import ThreadBanCreate, UserPostBanCreate, TransgressionList, ReportedThreadList, ReportedUserPostList, ThreadReportDismiss, UserPostReportDismiss\n\nurlpatterns = [\n path('thread//ban/', ThreadBanCreate.as_view(), name='moderation_thread_ban'),\n path('post//ban/', UserPostBanCreate.as_view(), name='moderation_userpost_ban'),\n path('reports/threads/', ReportedThreadList.as_view(), name='moderation_thread_report_list'),\n path('reports/posts/', ReportedUserPostList.as_view(), name='moderation_userpost_report_list'),\n path('reports/dismiss/thread//', ThreadReportDismiss.as_view(), name='moderation_thread_report_dismiss'),\n path('reports/dismiss/post//', UserPostReportDismiss.as_view(), name='moderation_userpost_report_dismiss'), \n path('banned/', TransgressionList.as_view(), name='moderation_ban_page'),\n path('login/', auth_views.login, {'template_name': 'moderation/login.html'}, name='login'),\n path('logout/', auth_views.logout, {'template_name': 'moderation/logged_out.html', 'extra_context': {'form': AuthenticationForm}}, name='logout'),\n path('', RedirectView.as_view(pattern_name='dj-mod:moderation_ban_page', permanent=False))\n\n] \n","sub_path":"imagenaut/moderation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"537126648","text":"\"\"\"\npull_ocdid_data.py\n\nusage:\n python pull_ocdid_data.py\n\ncreates an SQLite3 database:\n ocd-id.sqlite.db\n\n\nThis script OCD-ID data from the opencivicdata/ocd-division-ids\nGitHub repo* and loads it into a SQLite3 database for inspection.\n\n *https://github.com/opencivicdata/ocd-division-ids/\n blob/master/identifiers/country-us.csv\n\n\nThe datasets:\n\n OCDEP 2: Open Civic Data Divisions\n This script reads the division IDs from the file\n _identifiers/country-us.csv_ in this repo,\n https://github.com/opencivicdata/ocd-division-ids/,\n and loads them into a sqlite database named _ocd-id.db_,\n under the table name `country_us`.\n \n In a second table, named `lookup`, the OCD-ID types are\n broken into columns to be joined to. The goal is to make\n a lookup table that maps from the column name and value in\n each state's voter file to the correct OCD-ID.\n \n The OCD-IDs are flobally unique identifiers for political divisions.\n Defined in:\n http://docs.opencivicdata.org/en/latest/proposals/0002.html\n Implemented in:\n https://github.com/opencivicdata/ocd-division-ids\n Identifier format: \n ocd-division/country:(/:)*\n\"\"\"\nfrom __future__ import print_function\nimport collections\n#import csv\nimport io\nimport os\nimport sqlite3\ntry:\n import urllib.request as request # Python 3\n from csv import DictReader\n\n def get_iostream(response):\n return io.StringIO(response.read().decode('utf-8'))\n\n def utf8(text):\n return text\n \nexcept ImportError:\n import urllib2 as request # Python 2\n import codecs\n from csv import DictReader #import csv\n\n def get_iostream(response):\n return io.BytesIO(response.read())\n\n def utf8(text):\n return unicode(text, 'utf-8')\n\n\nOCDID_US_DATA_URI = (\n 'https://github.com/opencivicdata/ocd-division-ids/'\n 'raw/master/identifiers/country-us.csv'\n)\n\nrunning_in_docker = os.path.exists('/.dockerenv')\nif running_in_docker:\n DATABASE_NAME = '/national-voter-file/data/ocd-id.sqlite.db'\nelse:\n DATABASE_NAME = 'ocd-id.sqlite.db'\n\n\nprint('Downloading from\\n', OCDID_US_DATA_URI)\nresponse = request.urlopen(OCDID_US_DATA_URI)\niostream = get_iostream(response)\nrdr = DictReader(iostream)\nfieldnames = rdr.fieldnames\nall_rows = [row for row in rdr]\nids = [row['id'] for row in all_rows]\n\n\n# First, put the whole dataset into sqlite3 as it is.\n# Create the table\nprint('Writing to', DATABASE_NAME)\nconn = sqlite3.connect(DATABASE_NAME)\nc = conn.cursor()\nc.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS\n country_us (ocdid TEXT PRIMARY KEY\n ,{},\n CONSTRAINT unique_ocdid UNIQUE (ocdid) ON CONFLICT IGNORE\n );\n \"\"\".format('\\n ,'.join(\n '{} TEXT'.format(k) for k in fieldnames[1:]))\n)\n\n# Populate the table\ninsertion = \"\"\" \n INSERT INTO country_us\n (ocdid\\n,{})\n VALUES ({})\n\"\"\".format('\\n,'.join(fieldnames[1:]), ', '.join(['?'] * len(fieldnames)))\nc.executemany(\n insertion,\n [tuple(utf8(row[f]) for f in fieldnames) for row in all_rows])\n\n\n# Now, get the hierarchy of region types by looking at the first column.\n# OCD-ID values are of the form \n# ocd-division/country:(/:)*\nsplits = [id.split('/') for id in ids]\ntypes = [(s[2], tuple(sub.split(':')[0] for sub in s[2:])) for s in splits if len(s) > 2]\ntype_hierarchy = {}\nfor locale, entry in types:\n if locale not in type_hierarchy:\n type_hierarchy[locale] = {}\n sub_type = type_hierarchy[locale]\n for type in entry:\n if type not in sub_type:\n sub_type[type] = {'COUNT': 0}\n sub_type[type]['COUNT'] += 1\n sub_type = sub_type[type]\n\n\n# -----------------------\n# Get all of the possible columns in the dataset\ntmp = set([sub.split(':')[0] for s in splits if len(s) > 2 for sub in s[2:]])\nall_possible_columns = sorted([s.lower() for s in tmp])\n\nc.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS\n lookup (\n ocdid TEXT PRIMARY KEY\n ,{},\n CONSTRAINT unique_lookup_ocid UNIQUE (ocdid) ON CONFLICT IGNORE\n );\n \"\"\".format('\\n ,'.join(\n '{} TEXT'.format(k) for k in all_possible_columns))\n)\n\ninsertion_template = \"\"\" \n INSERT INTO lookup\n (ocdid, {})\n VALUES ('{}', {})\n\"\"\"\n\nfor id, split in zip(ids, splits):\n if len(split) > 2:\n all_keys, all_vals = zip(*[utf8(s).split(':') for s in split[2:]])\n insertion = insertion_template.format(\n ', '.join(all_keys),\n id,\n ','.join(['?'] * len(all_vals))\n )\n c.execute(insertion, all_vals)\n\nconn.commit()\nconn.close()\n","sub_path":"src/python/utils/ocdidreporter/pull_ocdid_data.py","file_name":"pull_ocdid_data.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"610692725","text":"\n\n#--------------------------------------------------------------------------------------------\n# it will take a context free grammer and calculate its PDA step by step as the time goes by\n#--------------------------------------------------------------------------------------------\n\nimport sys, os, time\n# grammer class\nclass Grammer:\n\n\tdef __init__(self, grammer):\n\t\tself._grammer = grammer\n\t\tself._stack = [] # instance variable unique to each instance; our pda stack\n\n\t# printing the context free grammer\n\tdef show(self):\n\t\tif self.cf():\n\t\t\tprint(\"[+] Context Free Detected: {}\".format(self._grammer))\n\t\telse:\n\t\t\tprint(\"[!] Not A Context Free!\")\n\t\t\tsys.exit(1)\n\n\t# checking context free procedure\n\tdef cf(self):\n\t\ti = 0\n\t\twhile i < len(self._grammer):\n\t\t\tif len(self._grammer[i][1].split(\"->\")[0])>1:\n\t\t\t\treturn False\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\treturn True\n\n\t# checking string derivation procedure\n\t# we know that every variable is reachable from S in the grammer; we assume its a normal grammer!\n\t# so if we derivate S till we found a match between our str and the derivation itself we can produce our considered string, cause there is a variables\n\t# which will end up to the lambda or a terminal.\n\tdef strdev(self, st):\n\t\t# checking if the string is a derivation from our grammer or not\n\t\t# it'll return True if there was any match or false for none\n\t\t# we derivate S till we find a match between our str and the derivation itself\n\t\ti = j = k = 0\n\t\t# cause every varible is reachable from S so we store the first rule which is S in our pdev stack to derivate it till the end of our string\n\t\t#pdev = [self._grammer[0][1]]\n\t\tterminal_or_lambda_rule = []\n\t\tif st[-1:]!=self._grammer[0][1].split('->')[1][-1:]:\n\t\t\tprint(\"[-] Can't Derivate, Last Character '{}' of Input String Detected!\".format(st[-1:]))\n\t\t\treturn False\n\t\t# finding varibles which end up to terminal or lambda in every rule\n\t\tfor i in range(len(self._grammer)):\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==1 and self._grammer[i][1].split(\"->\")[1].islower():\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==1 and self._grammer[i][1].split(\"->\")[1]==\"^\":\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t\tif len(self._grammer[i][1].split(\"->\")[1])==2:\n\t\t\t\tterminal_or_lambda_rule.append(self._grammer[i][1])\n\t\t#print(terminal_or_lambda_rule)\n\t\t# every variable is reachable from S so we'll find those variables which is in terminal_or_lambda_rule stack in S and replace them with lambda or their terminals\n\t\t# and we'll do this till we find a match between our input string and S produced derivation \n\t\t# ISSUE: it can't derivate the grammer n times, kind of AI required to detect the number of derivations from our input string; doesn't accept \"aababab\"!!!!\n\t\t''' FIXED: we should derivate our devstr as the length as of our input string; \n\t\t\t\t for example if len(st) is equal to 7 then the length of our devstr must be 7\n\t\t\t\t (number of lambdas won't calculate, cause they will increase the length of our devstr) '''\n\t\tdevstr = self._grammer[0][1].split('->')[1]\n\t\tinput_string_length = len(st)\n\t\tdevstr_length = len(devstr)\n\t\t# we derivate the devstr till we find a match between the length of our input string and the length of devstr itself\n\t\twhile devstr_length!=input_string_length:\n\t\t\tfor k in range(len(terminal_or_lambda_rule)):\n\t\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0] in devstr and '^' not in terminal_or_lambda_rule[k].split(\"->\")[1]:\n\t\t\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0]=='S':\n\t\t\t\t\t\tdevstr = devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], devstr)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdevstr = devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], terminal_or_lambda_rule[k].split(\"->\")[1])\n\t\t\tdevstr_length = len(devstr)\n\t\t# we found the desired derivation for our input string, then we need to produce our string from our S derivation \n\t\tprint(devstr) # debug purposes\n\t\t# so we replace varibles in S with their terminals or lambdas\t\t\n\t\tfor k in range(len(terminal_or_lambda_rule)):\n\t\t\tif terminal_or_lambda_rule[k].split(\"->\")[0] in devstr:\n\t\t\t\tdevstr += \"*->\"+devstr.replace(terminal_or_lambda_rule[k].split(\"->\")[0], terminal_or_lambda_rule[k].split(\"->\")[1])\n\t\tprint(\"[+] Processing Derivation...\")\n\t\ttime.sleep(1)\n\t\tprint(devstr) # debug purposes\n\t\t# removing lambdas from our derivation string \n\t\tprint(\"[+] Removing Lambdas...\")\n\t\tif \"^\" in devstr:\n\t\t\tdevstr = devstr.replace(\"^\", '')\n\t\ttime.sleep(1)\n\t\tprint(devstr) # debug purposes\n\t\tif st in devstr:\n\t\t\tprint(\"[+] Derivation Found!\")\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"[-] Unable To Find Derivation!\")\n\t\t\treturn False\n\n\n\t# computing pda for our input string right after we derivated it from our grammer\n\tdef pda(self, st):\n\t\tpass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntry:\n\t# getting & storing the rules\n\tn = int(input(\"[+] Number Of Rules: \"))\n\ti = j = 0\n\tgr = []\n\twhile i < n:\n\t\tgr.append(input(\"[+] Enter Grammer: \"))\n\t\ti += 1\n\t# removing | from grammer\n\t# for j in range(len(gr)):\n\t# \tif '|' in gr[j].split(\"->\")[1]:\n\t# \t\tgr.append(gr[j].split(\"->\")[0]+\"->\"+gr[j].split(\"->\")[1].split(\"|\")[0])\n\t# \t\tgr.append(gr[j].split(\"->\")[0]+\"->\"+gr[j].split(\"->\")[1].split(\"|\")[1])\n\t# \t\tdel gr[j]\n\t# \telse:\n\t# \t\tpass\n\tgr = list(enumerate(gr))\n\t# initializing grammer instance\n\tg = Grammer(gr)\n\t# printing the considered rules\n\tg.show()\n\t# getting string from user\n\tst = input(\"[+] Input String: \")\n\t# string derivation process\n\tg.strdev(st)\n\t# if g.strdev(st):\n\t# \t# if we found our string in some derivation of S the there is a pda that accept this string/input\n\t# \tg.pda(st) \n\n\n# user input to stop the script like ctrl+C\nexcept KeyboardInterrupt:\n\tprint(\"\\n[*] Ctrl + C pressed\")\n\tsys.exit(1)","sub_path":"PDA.py","file_name":"PDA.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"273832161","text":"import asyncio\nimport json\nimport random\n\nimport aiohttp\nimport discord\nfrom discord.ext import commands\nfrom libneko.aggregates import Proxy\n\nfrom synth.utils import errors, extras\n\n\nclass API:\n \"\"\"Overall manager for all API's this bot is using.\"\"\"\n def __init__(self, bot: commands.Bot):\n self.session = aiohttp.ClientSession()\n self.colorapi = ColorAPI(self.session)\n self.pypi = PyPI(self.session)\n self.urbandictionary = UrbanDictionary(self.session)\n self.ksoft = Ksoft(self.session, bot.config.ksoft)\n self.nekobot = NekoBot(self.session)\n\nclass PyPI:\n \"\"\"Docs: https://warehouse.readthedocs.io/api-reference/json\"\"\"\n\n class PyPIResponse:\n def __init__(self, response):\n self._info = response[\"info\"]\n self.author = self._info[\"author\"]\n self.name = self._info[\"name\"]\n self.summary = self._info[\"summary\"] if self._info[\"summary\"] != \"UNKNOWN\" else None\n self.link = self._info[\"package_url\"]\n self.classifiers = self.__get_classifiers()\n self.releases = len(response[\"releases\"])\n if self._info['project_urls']:\n self.urls = \"\\n\".join([f\"[{url}]({self._info['project_urls'][url]})\" for url in self._info[\"project_urls\"] if self._info['project_urls'][url] != \"UNKNOWN\"])\n else:\n self.urls = None\n\n self.embed = self.__generate_embed()\n\n def __get_classifiers(self, limit=5):\n classifiers = self._info[\"classifiers\"]\n if classifiers:\n sliced = classifiers[:limit]\n if len(classifiers) > limit:\n sliced.append(f\"... and {len(classifiers) - len(sliced)} more.\")\n return \"\\n\".join(sliced) \n else:\n return \"No classifiers for this project.\" \n\n def __generate_embed(self):\n embed = discord.Embed(color=discord.Color.blue(), description=self.summary)\n embed.set_author(name=self.name, url=self.link)\n embed.set_thumbnail(url=\"https://i.imgur.com/obx8Wis.png\")\n embed.add_field(name=\"Classifiers\", value=self.classifiers, inline=False)\n embed.add_field(name=\"Links\", value=self.urls or \"No valid project urls.\", inline=False)\n embed.set_footer(text=f\"{self.releases} releases\")\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://pypi.org/pypi/\"\n\n async def get_project(self, project_name: str):\n project_name = project_name.lower()\n async with self.session.get(self.base_url + project_name + \"/json\") as resp:\n if resp.status != 200:\n raise commands.BadArgument\n \n return self.PyPIResponse(await resp.json())\n\nclass UrbanDictionary:\n \"\"\"Docs: None (lol)\"\"\"\n\n class UrbanDictionaryResponse:\n def __init__(self, response):\n self._list = response[\"list\"]\n if self._list:\n self._list = self._list[0]\n self.word = self._list[\"word\"]\n self.definition = self.__format(self._list[\"definition\"])\n self.example = self.__format(self._list[\"example\"])\n self.thumbs_up = self._list[\"thumbs_up\"]\n self.thumbs_down = self._list[\"thumbs_down\"]\n self.link = self._list[\"permalink\"]\n self.icon = \"https://i.imgur.com/35mpixh.png\"\n\n self.embed = self.__generate_embed()\n else:\n raise commands.BadArgument\n\n def __generate_embed(self) -> discord.Embed:\n embed = discord.Embed(color=discord.Color.orange())\n embed.set_author(name=f\"{self.word} [{self.thumbs_up} 👍 | {self.thumbs_down} 👎]\", icon_url=self.icon, url=self.link)\n embed.add_field(name=\"Definition\", value=self.definition, inline=False)\n embed.add_field(name=\"Example\", value=self.example, inline=False)\n\n return embed\n\n def __format(self, string: str):\n s = string.replace(\"[\", \"**\").replace(\"]\", \"**\").replace(\"`\", \"’\")\n return s if not len(s) >= 1024 else s[:1020] + \" ...\"\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"http://api.urbandictionary.com/v0/define?term=\"\n\n async def get_term(self, term: str):\n term = term.lower()\n try:\n async with self.session.get(self.base_url + term) as resp:\n return self.UrbanDictionaryResponse(response=await resp.json())\n except:\n raise errors.APIError\n\nclass ColorAPI:\n \"\"\"Docs: https://www.thecolorapi.com/docs\"\"\"\n\n class ColorAPIResponse:\n def __init__(self, response):\n try:\n self.hex = response[\"hex\"][\"clean\"]\n self.rgb = response[\"rgb\"][\"value\"]\n self.hsl = response[\"hsl\"][\"value\"]\n self.hsv = response[\"hsv\"][\"value\"]\n self.cmyk = response[\"cmyk\"][\"value\"]\n self.xyz = response[\"XYZ\"][\"value\"]\n self.color = self.__format_color(response[\"rgb\"])\n self.name = response[\"name\"][\"value\"]\n self.link = \"https://www.thecolorapi.com/id?format=html&hex=\" + self.hex\n\n self.embed = self.__generate_embed()\n except:\n raise commands.BadArgument\n\n def __format_color(self, rgb):\n dummy = Dummy()\n dummy.r = rgb[\"r\"]\n dummy.g = rgb[\"g\"]\n dummy.b = rgb[\"b\"]\n\n return dummy\n\n def __generate_embed(self) -> discord.Embed:\n embed = discord.Embed(color=discord.Color.from_rgb(self.color.r, self.color.g, self.color.b))\n embed.set_author(name=self.name, url=self.link, icon_url=\"https://www.htmlcsscolor.com/preview/32x32/\" + self.hex + \".png\")\n embed.add_field(name=\"Hex\", value=f\"#{self.hex}\")\n embed.add_field(name=\"RGB\", value=self.rgb)\n embed.add_field(name=\"HSL\", value=self.hsl)\n embed.add_field(name=\"HSV\", value=self.hsv)\n embed.add_field(name=\"CMYK\", value=self.cmyk)\n embed.add_field(name=\"XYZ\", value=self.xyz)\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://www.thecolorapi.com/\"\n\n async def get_color(self, color):\n color = color.strip(\"#\")\n async with self.session.get(self.base_url + \"id?hex=\" + color) as resp:\n return self.ColorAPIResponse(await resp.json())\n\nclass Ksoft:\n def __init__(self, session: aiohttp.ClientSession, token):\n self.session = session\n self.token = token\n self.base_url = \"https://api.ksoft.si/\"\n\n async def __request(self, endpoint, **kwargs):\n async with self.session.get(self.base_url + endpoint, headers={\"Authorization\": self.token}, params=kwargs) as resp:\n proxy = Proxy(from_keys=(await resp.json()))\n if self.__validate(proxy):\n return proxy\n else:\n raise errors.KsoftError(message=proxy[\"message\"])\n\n def __validate(self, proxy):\n try:\n if proxy.code == 404:\n return False\n except:\n return True \n else:\n return True \n\n async def random_image(self, tag):\n \"\"\"Gets random image from the specified tag.\"\"\"\n return await self.__request(\"images/random-image\", tag=tag)\n\n async def tags(self):\n \"\"\"Retrieve the list of all available tags.\"\"\"\n return await self.__request(\"images/tags\")\n\n async def tag_search(self, search):\n \"\"\"Search for tags.\"\"\"\n return await self.__request(\"images/tags/\" + search)\n\n async def image_from_snowflake(self, snowflake):\n \"\"\"Retrieve image data.\"\"\"\n return await self.__request(\"images/image/\" + snowflake)\n\n async def random_meme(self):\n \"\"\"Retrieves a random meme from the cache. Source: reddit\"\"\"\n return await self.__request(\"images/random-meme\")\n\n async def random_wikihow(self):\n \"\"\"Retrieves weird images from WikiHow.\"\"\"\n return await self.__request(\"images/random-wikihow\")\n\n async def random_aww(self):\n \"\"\"Get random cute pictures, mostly animals.\"\"\"\n return await self.__request(\"images/random-aww\")\n\n async def random_nsfw(self):\n \"\"\"Retrieves random NSFW pics. (real life stuff)\"\"\"\n return await self.__request(\"images/random-nsfw\")\n\n async def random_image_from_subreddit(self, subreddit, remove_nsfw = True, span = \"day\"):\n \"\"\"Retrieve images from the specified subreddit.\"\"\"\n if span not in [\"hour\", \"day\", \"week\", \"month\", \"year\", \"all\"]:\n raise Ksoft(message=\"Span must be one of the following arguments: hour, day, week, month, year, all\")\n \n return await self.__request(\"images/rand-reddit/\" + subreddit, remove_nsfw=remove_nsfw, span=span)\n\n async def get_ban(self, user):\n \"\"\"Get more information about a ban.\"\"\"\n return await self.__request(\"bans/info\", user=user)\n\n async def check_ban(self, user):\n \"\"\"Simple way to check if the user is banned.\"\"\"\n return await self.__request(\"bans/check\", user=user)\n\n async def get_bans(self):\n \"\"\"Pagination of bans, you can request up to 1000 records per page, default is 20.\"\"\"\n return await self.__request(\"bans/list\")\n\n async def get_ban_updates(self, timestamp):\n \"\"\"Gets updates from the previous update.\"\"\"\n return await self.__request(\"bans/updated\", timestamp=timestamp)\n\n async def get_map(self, query):\n \"\"\"You can get coordinates and more information about the searched location, if needed image of the area is generated.\"\"\"\n return await self.__request(\"kumo/gis\", q=query)\n\n async def weather(self, query, report_type = \"currently\"):\n \"\"\"Gets weather from a location.\"\"\"\n report_types = [\"currently\", \"minutely\", \"hourly\", \"daily\"]\n if report_type not in report_types:\n raise errors.KsoftError(message=f\"Report type must be on the the following arguments: {', '.join(report_types)}\")\n\n return await self.__request(\"kumo/weather/\" + report_type, q=query)\n\n async def weather_advanced(self):\n \"\"\"Gets weather by coordinates, this endpoint is faster than weather - easy, because it doesn't need to lookup the location.\n https://api.ksoft.si/kumo/weather/\"\"\"\n raise NotImplementedError\n\n async def geoip(self, ip):\n \"\"\"Gets location data from the IP address.\"\"\"\n return await self.__request(\"kumo/geoip\", ip=ip)\n\n #async def currency(self, from, to, value):\n # \"\"\"Convert currency\"\"\"\n # \"\"\"https://en.wikipedia.org/wiki/ISO_4217#Active_codes\"\"\"\n # return await self.__request(\"kumo/currency\", from=from, to=to, value=value)\n\n async def lyrics(self, query):\n \"\"\"Searches for lyrics and returns a list of results.\"\"\"\n return await self.__request(\"lyrics/search\", q=query)\n\n async def artist(self, id):\n \"\"\"Retrieves all albums and songs by that artist.\"\"\"\n return await self.__request(\"lyrics/artist/\" + id)\n\n async def album(self, id):\n \"\"\"Retrieves artist name and all tracks in the album.\"\"\"\n return await self.__request(\"lyrics/album/\" + id)\n \n async def track(self, id):\n \"\"\"Get info about a song.\"\"\"\n return await self.__request(\"lyrics/track/\" + id)\n\nclass NekoBot:\n \"\"\"Docs: https://docs.nekobot.xyz\"\"\"\n\n class NekoBotResponse:\n def __init__(self, response):\n self.image = response[\"message\"]\n\n self.embed = self.__generate_embed()\n\n def __generate_embed(self):\n embed = discord.Embed(color=discord.Color.greyple())\n embed.set_image(url=self.image)\n\n return embed\n\n def __init__(self, session: aiohttp.ClientSession):\n self.session = session\n self.base_url = \"https://nekobot.xyz/api/\"\n\n async def __request(self, **kwargs):\n async with self.session.get(self.base_url + \"imagegen\", params=kwargs) as resp:\n return self.NekoBotResponse(await resp.json())\n\n async def threats(self, url: str):\n return await self.__request(type=\"threats\", url=url)\n\n async def baguette(self, url: str):\n return await self.__request(type=\"baguette\", url=url)\n\n async def clyde(self, text: str):\n return await self.__request(type=\"clyde\", text=text)\n\n async def ship(self, user1: str, user2: str):\n return await self.__request(type=\"ship\", user1=user1, user2=user2)\n\n async def captcha(self, url: str, username: str):\n return await self.__request(type=\"captcha\", url=url, username=username)\n\n async def whowouldwin(self, user1: str, user2: str):\n return await self.__request(type=\"whowouldwin\", user1=user1, user2=user2)\n\n async def changemymind(self, text: str):\n return await self.__request(type=\"changemymind\", text=text) \n\n async def jpeg(self, url: str):\n return await self.__request(type=\"jpeg\", url=url)\n \n async def lolice(self, url: str):\n return await self.__request(type=\"lolice\", url=url)\n\n async def kannagen(self, text: str):\n return await self.__request(type=\"kannagen\", text=text)\n\n async def iphonex(self, url: str):\n return await self.__request(type=\"iphonex\", url=url)\n\n async def kms(self, url: str):\n return await self.__request(type=\"kms\", url=url)\n\n async def animeface(self, image: str):\n return await self.__request(type=\"animeface\", image=image)\n\n async def awooify(self, url: str):\n return await self.__request(type=\"awooify\", url=url)\n\n async def trap(self, name: str, author: str, image: str):\n return await self.__request(type=\"trap\", name=name, author=author, image=image) \n\n async def nichijou(self, text: str):\n return await self.__request(type=\"nichijou\", text=text)\n\n async def trumptweet(self, text: str):\n return await self.__request(type=\"trumptweet\", text=text)\n\n async def tweet(self, username: str, text: str):\n return await self.__request(type=\"tweet\", username=username, text=text)\n\n async def kidnap(self, image: str):\n return await self.__request(type=\"kidnap\", image=image)\n\n async def deepfry(self, image: str):\n return await self.__request(type=\"deepfry\", image=image)\n\n async def blurpify(self, image: str):\n return await self.__request(type=\"blurpify\", image=image)\n\n async def phcomment(self, image: str, text: str, username: str):\n return await self.__request(type=\"phcomment\", image=image, text=text, username=username)\n\n async def magik(self, image: str):\n return await self.__request(type=\"magik\", image=image, intensity=random.randint(0, 10))\n\n async def clickforhentai(self, image: str, fontsize: int):\n return await self.__request(type=\"clickforhentai\", image=image, fontsize=fontsize)\n\n async def trash(self, url: str):\n return await self.__request(type=\"trash\", url=url)\n\nclass Dummy:\n pass\n","sub_path":"synth/utils/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":15309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"75218567","text":"import markdown\nfrom django.contrib.syndication.views import Feed\nfrom django.core.paginator import Paginator, PageNotAnInteger, InvalidPage\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, reverse\nfrom .models import Paper, Tag, Comment, Category, User\n\n\n# Create your views here.\ndef render_page(request, papers=None, html_path='Index/index.html', paper=None):\n \"\"\"\n 根据所给数据渲染页面\n :param request: url请求包括POST, 和GET\n :param papers: 多篇文章\n :param html_path: html页面路径\n :param paper: 单片文章 --- 详情页\n :return: render(something)\n \"\"\"\n if papers:\n limit = 3 # 按每页4条分页\n page_number = 3 # 每页页码数量\n paginator = Paginator(papers, limit)\n if request.method == \"GET\":\n # 获取 url 后面的 page 参数的值, 首页不显示 page 参数, 默认值是 1\n page = request.GET.get('page')\n try:\n papers = paginator.page(page)\n # 注意捕获异常\n except PageNotAnInteger:\n # 如果请求的页数不是整数, 返回第一页。\n papers = paginator.page(1)\n except InvalidPage:\n # 如果请求的页数不存在, 重定向页面\n return HttpResponse('找不到页面的内容')\n\n finally:\n # 生成当前页页码范围\n\n if papers.number+page_number < papers.paginator.num_pages:\n page_range = range(papers.number, papers.number+page_number)\n else:\n page_range = range(papers.number, papers.paginator.num_pages)\n\n dates = Paper.objects.dates('date', 'month')[0:3]\n tags = Tag.objects.all()\n categories = Category.objects.all()\n all_paper = Paper.objects.all()\n latest_papers = all_paper.order_by('-date')\n latest_papers = latest_papers[0:4]\n return render(request, html_path,\n {\n 'papers': papers,\n 'dates': dates,\n 'tags': tags,\n 'latest_papers': latest_papers,\n 'categories': categories,\n 'username': request.session.get('username'),\n 'paper': paper,\n 'page_range': page_range\n })\n\n\ndef detail(request, paper_id):\n if request.method == 'GET':\n paper = Paper.objects.get(pk=paper_id)\n paper.reading += 1\n paper.save()\n return render_page(request, paper=paper, html_path='blog/single.html')\n\n # comment\n elif request.method == 'POST':\n comment_ = request.POST.get('comment')\n paper = Paper.objects.get(pk=paper_id)\n # TODO 这里的 user 要改成当前登录用户\n user = request.session.get('username')\n user = User.objects.get(name=user)\n comment = Comment(content=comment_, paper=paper, user=user)\n comment.save()\n return redirect(to=reverse('blog:detail', args=paper_id))\n\n\ndef write(request):\n if request.method == 'GET':\n return render_page(request, html_path='blog/write.html')\n\n elif request.method == 'POST':\n paper = Paper()\n paper.name = request.POST.get('name')\n paper.user = User.objects.get(name=request.session['username'])\n paper.category = Category.objects.get(pk=request.POST.get('category'))\n paper.save()\n # paper.tag_set = tags 不需要save就可以添加多对多字段\n for tag_id in request.POST.getlist('tag'):\n paper.tag.add(tag_id)\n\n paper.content = request.POST.get('content')\n # markdown渲染\n md = markdown.Markdown(extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ])\n paper.content = md.convert(paper.content)\n paper.toc = md.toc\n paper.save()\n return redirect(to=reverse('blog:detail', args=[paper.id]))\n\n\ndef category(request, category_id):\n category_ = Category.objects.get(pk=category_id)\n papers = category_.paper_set.all()\n return render_page(request, papers=papers)\n\n\ndef tag(request, tag_id):\n category_ = Category.objects.get(pk=tag_id)\n papers = category_.paper_set.all()\n return render_page(request, papers=papers)\n\n\ndef date(request, date_):\n year = int(date_[0:4])\n month = int(date_[5:7])\n papers = Paper.objects.filter(date__year=year).filter(date__month=month).all()\n return render_page(request, papers=papers)\n\n\nclass RSSFeed(Feed):\n title = \"RSS feed - article\"\n link = \"/\"\n description = \"RSS feed - blog posts\"\n\n def items(self):\n return Paper.objects.order_by('-date')\n\n def item_title(self, item):\n return item.name\n\n def item_pubdate(self, item):\n return item.date\n\n def item_description(self, item):\n return item.toc\n\n def item_link(self, item):\n return reverse('blog:detail', args=(item.id,))\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"93225762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\"\"\"This file is part of the django ERP project.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\n__author__ = 'Emanuele Bertoldi '\n__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'\n__version__ = '0.0.5'\n\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\n\nfrom ..models import *\nfrom ..utils import *\n\nclass FakeModel():\n pk = 5\n\nclass CreateBookmarksUtilTestCase(TestCase):\n def test_create_bookmarks_for_user(self):\n \"\"\"Tests creating bookmarks for the given user instance.\n \"\"\"\n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n m, n = create_bookmarks(u1)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(m.slug, \"user_%d_bookmarks\" % u1.pk)\n # NOTE: the bookmark menu was already created by the signal handler.\n self.assertEqual(n, False)\n \n def test_create_bookmarks_for_any_model(self):\n \"\"\"Tests creating bookmarks for a generic model instance.\n \"\"\" \n fm = FakeModel()\n m, n = create_bookmarks(fm)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(m.slug, \"fakemodel_5_bookmarks\")\n self.assertEqual(n, True)\n\nclass DeleteBookmarksUtilTestCase(TestCase):\n def test_delete_bookmarks_for_user(self):\n \"\"\"Tests deleting bookmarks of the given user instance.\n \"\"\"\n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n m, n = create_bookmarks(u1)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(u1)).exists(),\n True\n )\n \n delete_bookmarks(u1)\n\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(u1)).exists(),\n False\n )\n \n def test_delete_bookmarks_for_any_model(self):\n \"\"\"Tests deleting bookmarks of a generic model instance.\n \"\"\" \n fm = FakeModel()\n m, n = create_bookmarks(fm)\n \n self.assertTrue(isinstance(m, Menu))\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n True\n )\n \n delete_bookmarks(fm)\n\n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \n def test_delete_bookmarks_without_bookmarks(self):\n \"\"\"Tests calling \"delete_bookmarks\" on an instance without bookmarks.\n \"\"\" \n fm = FakeModel()\n \n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \n delete_bookmarks(fm)\n \n self.assertEqual(\n Menu.objects.filter(slug=get_bookmarks_slug_for(fm)).exists(),\n False\n )\n \nclass GetBookmarksForUtilTestCase(TestCase):\n def test_bookmarks_for_user(self):\n \"\"\"Tests retrieving bookmark list owned by user with a given username.\n \"\"\" \n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n \n self.assertTrue(n)\n \n bookmarks = Menu.objects.get(slug=get_bookmarks_slug_for(u1))\n \n self.assertEqual(get_bookmarks_for(u1.username), bookmarks)\n \nclass GetUserOfUtilTestCase(TestCase): \n def test_user_of_bookmarks(self):\n \"\"\"Tests retrieving the user of bookmarks identified by the given slug.\n \"\"\" \n u1, n = get_user_model().objects.get_or_create(username=\"u1\")\n bookmarks = Menu.objects.get(slug=\"user_1_bookmarks\")\n \n self.assertEqual(get_user_of(bookmarks.slug), u1)\n \nclass CreateDetailNavigationTestCase(TestCase):\n def test_create_detail_navigation(self):\n \"\"\"Tests creating a detail view navigation menu.\n \"\"\"\n m, n = create_detail_navigation(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_detail_navigation\")\n self.assertEqual(m.description, \"Fakemodel navigation\")\n \nclass CreateDetailActionsTestCase(TestCase):\n def test_create_detail_actions(self):\n \"\"\"Tests creating a detail view action menu.\n \"\"\"\n m, n = create_detail_actions(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_detail_actions\")\n self.assertEqual(m.description, \"Fakemodel actions\")\n \nclass CreateListActionsTestCase(TestCase):\n def test_create_list_actions(self):\n \"\"\"Tests creating a list view action menu.\n \"\"\"\n m, n = create_list_actions(FakeModel)\n \n self.assertEqual(m.slug, \"fakemodel_list_actions\")\n self.assertEqual(m.description, \"Fakemodel list actions\")\n","sub_path":"djangoerp/menus/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"517969335","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cal', '0042_auto_20160925_0730'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='gcalendar',\n name='color_index',\n field=models.CharField(default='10', max_length=10, choices=[(b'1', b'#a4bdfc'), (b'2', b'#7ae7bf'), (b'3', b'#dbadff'), (b'4', b'#ff887c'), (b'5', b'#fbd75b'), (b'6', b'#ffb878'), (b'7', b'#46d6db'), (b'8', b'#e1e1e1'), (b'9', b'#5484ed'), (b'10', b'#51b749'), (b'11', b'#dc2127')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"cal/cal/migrations/0043_gcalendar_color_index.py","file_name":"0043_gcalendar_color_index.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"150097728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This module creates the postgres database\n\nfrom troposphere import Parameter, Ref, GetAtt, Tags, Join, Output\nfrom troposphere.rds import DBInstance, DBSubnetGroup\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupRule\n\nimport config as cfn\nfrom config import template, CLOUDENV, CLOUDNAME, DEFAULT_ROUTE\n\ndef emit_configuration():\n vpc = cfn.vpcs[0]\n region = Ref(\"AWS::Region\")\n\n dbname = template.add_parameter(\n Parameter(\n \"RDSDatabaseInstanceName\",\n Default=\"reporting{0}\".format(CLOUDENV),\n Description=\"Postgres Instance Name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"63\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=\"Must begin with a letter and contain only alphanumeric characters\"\n )\n )\n\n dbuser = template.add_parameter(\n Parameter(\n \"RDSDatabaseUser\",\n Default=\"sa\",\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"63\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=\"Must being with a letter and be alphanumeric\"\n )\n )\n\n dbpassword = template.add_parameter(\n Parameter(\n \"RDSDatabasePassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"Must contain only alphanumeric characters.\",\n Default=\"LeafLeaf123\"\n )\n )\n\n dbclass = template.add_parameter(\n Parameter(\n \"RDSInstanceClass\",\n Default=\"db.t2.medium\",\n Description=\"Database instance size\",\n Type=\"String\",\n AllowedValues=[\n \"db.t2.small\", \"db.t2.medium\", \"db.m3.medium\", \"db.m3.large\",\n \"db.m3.xlarge\", \"db.m3.2xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\"\n ]\n )\n )\n\n allocated_storage = template.add_parameter(\n Parameter(\n \"RDSAllocatedStorage\",\n Default=\"100\",\n Description=\"The size of the Postgres Database (GB)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"512\",\n ConstraintDescription=\"Must be between 5 and 512 GB\"\n )\n )\n\n db_subnet_group = template.add_resource(\n DBSubnetGroup(\n \"RDSSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for RDS in {0}\".format(CLOUDNAME),\n SubnetIds=[Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)],\n DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)]\n )\n )\n\n ingress_rules = [\n SecurityGroupRule(\n IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]\n ) for p in [('tcp', 5432)]]\n\n security_group = template.add_resource(\n SecurityGroup(\n \"RDSDatabaseSecurityGroup\",\n GroupDescription=\"Security group for Postgres Instances\",\n VpcId=Ref(vpc),\n SecurityGroupIngress=ingress_rules,\n DependsOn=vpc.title\n )\n )\n\n database = template.add_resource(\n DBInstance(\n \"RDSPostgresInstance\",\n DBInstanceIdentifier=Ref(dbname),\n AllocatedStorage=Ref(allocated_storage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"postgres\",\n EngineVersion=\"9.3.6\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(db_subnet_group),\n VPCSecurityGroups=[Ref(security_group)],\n DependsOn=[sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE)]\n )\n )\n\n template.add_output(\n Output(\n \"ConnectionString\",\n Description=\"JDBC connection string for Postgres\",\n Value=Join(\"\", [\n GetAtt(\"RDSPostgresInstance\", \"Endpoint.Address\"),\n GetAtt(\"RDSPostgresInstance\", \"Endpoint.Port\")\n ])\n )\n )\n","sub_path":"components/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"303095065","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\n# import os\n# sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nsys.path.append('D:\\\\WorkSpace\\\\question-bank')\n# sys.path.append('/var/www/question-bank.manual')\n\nimport codecs\nimport json\nfrom collections import OrderedDict\nfrom argparse import ArgumentParser\n\n\ndef arg_parser():\n parser = ArgumentParser(prog=__file__)\n parser.add_argument('-sc', '--schemaFile', type=str,\n metavar='XXX.json', required=True,\n help='json file of schemas to update.')\n parser.add_argument('-u', '--usage', type=str, default=None,\n metavar='USAGE', required=True,\n help='schema usage')\n parser.add_argument('-sid', '--subject_id', type=int, default=103,\n metavar='[101|102|103]',\n help='subject_id, default: 103')\n parser.add_argument('-sn', '--schema_name', type=str, default=None,\n metavar='SCHEMA_NAME',\n help='only update this schema.')\n return parser\n\n\ndef update_schema(fname, usage, subject_id, schema_name_to_update):\n if not StoneSchema.objects(usage=usage, deleted_at=None):\n choice = raw_input('\\nusage<{}> does not exist yet, do you wanna add it?\\n\\t'.format(usage))\n if choice.lower() not in ['y', 'yes']:\n sys.exit()\n schemas = OrderedDict()\n\n with codecs.open(fname, mode='r', encoding='utf-8') as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n schemas.update(OrderedDict(data))\n # print(json.dumps(schemas, indent=4))\n if schema_name_to_update:\n json_schema = schemas.get(schema_name_to_update)\n if json_schema:\n save_schema(schema_name_to_update, subject_id, usage, json_schema)\n else:\n print('schema_name={} NOT found!'.format(schema_name_to_update))\n else:\n for schema_name, json_schema in schemas.items():\n save_schema(schema_name, subject_id, usage, json_schema)\n\n\ndef save_schema(schema_name, subject_id, usage, json_schema):\n schema = StoneSchema.get_by_name(schema_name)\n if not schema:\n schema = StoneSchema()\n schema.subject_id = subject_id\n schema.name = schema_name\n schema.name_display = json_schema.get('title', schema_name)\n schema.usage = usage\n schema.json_schema = json.dumps(json_schema, ensure_ascii=False)\n # print(schema.to_mongo())\n try:\n schema.save()\n except BankValidationError as e:\n print(False, e.error_message())\n\n\nif __name__ == '__main__':\n args = arg_parser().parse_args()\n # print(args)\n from errors import BankValidationError\n from models.stone_models import StoneSchema\n update_schema(args.schemaFile, args.usage, args.subject_id, args.schema_name)\n","sub_path":"schema_manager/update_schema.py","file_name":"update_schema.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"109633921","text":"#!/usr/bin/python\n\nimport logging\nimport re\nimport defusedxml.ElementTree as ET #use hardened xml implementation because reading unknown xml document\n\nlogger = logging.getLogger('opf')\n\n\nDC11_NS = 'http://purl.org/dc/elements/1.1/'\nOPF2_NS = 'http://www.idpf.org/2007/opf'\nAPP_NAME = \"opf\"\n\n\ndef opf(name):\n return '{%s}%s' % (OPF2_NS, name)\n\n\ndef dc(name):\n return '{%s}%s' % (DC11_NS, name)\n\n\nclass Metadata:\n def __init__(self):\n self.title = \"\"\n self.authors = []\n self.series = \"\"\n self.series_index = None\n self.title_sort = \"\"\n self.language = \"en\"\n self.tags = []\n self.publication_year = None\n\n logger.info(\"opf inititalized\")\n\n @classmethod\n def from_file(cls, filename):\n def clean_string(string):\n string = string.strip()\n string = re.sub(\" +\", \" \", string)\n return string\n\n tree = ET.parse(filename)\n root = tree.getroot()\n result = cls()\n for child in root:\n if child.tag.endswith('metadata'):\n for meta in child:\n logger.debug(\"%s %s %s\", meta.tag, meta.attrib, meta.text)\n if meta.text:\n text = clean_string(meta.text)\n if meta.tag.endswith('title'):\n result.title = text\n elif meta.tag.endswith('language'):\n result.language = text\n elif meta.tag.endswith('creator'):\n result.authors.append(text)\n elif meta.tag.endswith('subject'):\n result.tags.append(text)\n elif meta.tag.endswith('date'):\n year_match = re.match(\"([0-9]{4}).*\", text)\n if year_match:\n result.publication_year = int(year_match.group(1))\n elif meta.tag.endswith('meta'):\n attrib = meta.attrib['name']\n if attrib == 'calibre:title_sort':\n result.title_sort = clean_string(meta.attrib['content'])\n elif attrib == 'calibre:series':\n result.series = clean_string(meta.attrib['content'])\n elif attrib == 'calibre:series_index':\n result.series_index = clean_string(meta.attrib['content'])\n\n return result\n\n def __str__(self):\n series_text = \"\"\n if self.series_index:\n series_text = \" Part %d of series %s\" % (self.series_index, self.series)\n\n return \"Metadata: %s by %s Language: %s Tags: %s\" % (\n self.title, \"; \".join(self.authors), self.language, self.tags) + series_text\n\n","sub_path":"pydb/opf.py","file_name":"opf.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"492805124","text":"#%% Imports\nimport pandas as pd\nimport numpy as np\nimport logging\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport os\nrepopath = '/Users/francescofusaro/Documents/Github/aml2020'\nos.chdir(repopath)\n\nimport hashlib\n\nfrom biosppy.signals import ecg\n#from ecgdetectors import Detectors\n#from hrv import HRV\nimport neurokit2 as nk\n\n#%% Populate container for plot signals\ndef populate_PlotData(PD,i,sample_id,class_id,raw_ecg,rpeaks_biosppy,filtered_biosppy,signals_neurokit):\n PD[i][0]=sample_id\n PD[i][1]=class_id\n PD[i][2]=raw_ecg\n PD[i][3]=rpeaks_biosppy\n PD[i][4]=filtered_biosppy\n PD[i][5]=signals_neurokit\n \n return(PD)\n#%%Load Data Set\ndef load_data(repopath):\n X = pd.read_csv(f'{repopath}/project3_ffu/X_train_small.csv')\n y = pd.read_csv(f'{repopath}/project3_ffu/y_train_small.csv')\n #X_test = pd.read_csv(f'{repopath}/project3_ffu/X_test.csv')\n X_test = 0\n logging.info('Dataset imported')\n \n return (X, y, X_test)\n\n#%%Split Classes\ndef split_classes(X,y):\n class0_ls = y.index[y['y'] == 0].tolist() #healthy\n class1_ls = y.index[y['y'] == 1].tolist() #Arrhythmia1\n class2_ls = y.index[y['y'] == 2].tolist() #Arrhythmia2\n class3_ls = y.index[y['y'] == 3].tolist() #Noise\n \n X0 = X.iloc[class0_ls,:]\n df_X0 = pd.DataFrame(data=X0,columns=X.columns)\n \n X1 = X.iloc[class1_ls,:]\n df_X1 = pd.DataFrame(data=X1,columns=X.columns)\n \n X2 = X.iloc[class2_ls,:]\n df_X2 = pd.DataFrame(data=X2,columns=X.columns)\n \n X3 = X.iloc[class3_ls,:]\n df_X3 = pd.DataFrame(data=X3,columns=X.columns)\n \n return(df_X0, df_X1, df_X2, df_X3)\n\n#%% Define more flexible ecg_process function\ndef ecg_process_AML(ecg_signal, sampling_rate=1000, method=\"neurokit\"):\n \"\"\"Process an ECG signal as original neurokit2 function, see:\n https://neurokit2.readthedocs.io/en/latest/_modules/neurokit2/ecg/ecg_process.html#ecg_process\n \n However to increase flexibility, 'method' parameter is specifically set for each subfuntion call:\n \n - ecg_clean methods: Can be one of ‘neurokit’ (default), ‘biosppy’, ‘pamtompkins1985’, ‘hamilton2002’, ‘elgendi2010’, ‘engzeemod2012’.\n \n - ecg_peaks methods: Can be one of ‘neurokit’ (default), ‘pamtompkins1985’, ‘hamilton2002’, ‘christov2004’, ‘gamboa2008’, ‘elgendi2010’, ‘engzeemod2012’ or ‘kalidas2017’\n \n - ecg_delineate methods: Indentify PQRST peak Can be one of ‘peak’ (default) for a peak-based method, ‘cwt’ for continuous wavelet transform or ‘dwt’ for discrete wavelet transform.\n see: https://neurokit2.readthedocs.io/en/latest/examples/ecg_delineate.html\n \"\"\"\n \n #clean\n ecg_preprocess_clean_method = 'biosppy' \n ecg_cleaned = nk.ecg.ecg_clean(ecg_signal, sampling_rate=sampling_rate, method=ecg_preprocess_clean_method)\n # R-peaks\n ecg_preprocess_rpeaks_method = 'neurokit'\n instant_peaks, rpeaks, = nk.ecg.ecg_peaks(\n ecg_cleaned=ecg_cleaned, sampling_rate=sampling_rate, method=ecg_preprocess_rpeaks_method, correct_artifacts=True\n )\n\n rate = nk.signal_rate(rpeaks, sampling_rate=sampling_rate, desired_length=len(ecg_cleaned))\n\n quality = nk.ecg.ecg_quality(ecg_cleaned, rpeaks=None, sampling_rate=sampling_rate)\n\n signals = pd.DataFrame({\"ECG_Raw\": ecg_signal, \"ECG_Clean\": ecg_cleaned, \"ECG_Rate\": rate, \"ECG_Quality\": quality})\n\n # Additional info of the ecg signal\n ecg_preprocess_delineate_method = 'dwt'\n delineate_signal, delineate_info = nk.ecg.ecg_delineate(\n ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate, method=ecg_preprocess_delineate_method\n )\n \n if ecg_preprocess_delineate_method != 'peak':\n # 'dwt' and 'cwt' Unlike the peak method, 'dwt' and 'cwt' does not idenfity the Q-peaks and S-peaks.\n delineate_signal_peak, delineate_info_peak = nk.ecg.ecg_delineate(\n ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate, method='peak' \n )\n delineate_signal['ECG_Q_Peaks'] = delineate_signal_peak['ECG_Q_Peaks']\n delineate_signal['ECG_S_Peaks'] = delineate_signal_peak['ECG_S_Peaks']\n \n cardiac_phase = nk.ecg.ecg_phase(ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, delineate_info=delineate_info)\n\n signals = pd.concat([signals, instant_peaks, delineate_signal, cardiac_phase], axis=1)\n\n info = rpeaks\n return signals, info\n \n#%% Extracted peaks summary\ndef calc_peak_summary(signals, sampling_rate):\n #peak summary\n summary = []\n sig_qq = signals[signals['ECG_Q_Peaks'] == 1]\n q_count = len(sig_qq)\n sig_rr = signals[signals['ECG_R_Peaks'] == 1]\n r_count = len(sig_rr)\n sig_pp = signals[signals['ECG_P_Peaks'] == 1]\n p_count= len(sig_pp)\n sig_ss = signals[signals['ECG_S_Peaks'] == 1]\n s_count = len(sig_ss)\n sig_tt = signals[signals['ECG_T_Peaks'] == 1]\n t_count = len(sig_tt)\n \n #peak counts\n p_rel = p_count/r_count\n q_rel = q_count/r_count\n s_rel = s_count/r_count\n t_rel = t_count/r_count\n summary.append(p_rel)\n summary.append(q_rel)\n summary.append(r_count)\n summary.append(s_rel)\n summary.append(t_rel)\n \n #peak p amplitude\n p_mean = sig_pp['ECG_Clean'].mean()\n summary.append(p_mean)\n p_std = sig_pp['ECG_Clean'].std()\n summary.append(p_std)\n \n #peak s amplitude\n s_mean = sig_ss['ECG_Clean'].mean()\n summary.append(s_mean)\n s_std = sig_ss['ECG_Clean'].std()\n summary.append(s_std)\n \n #QRS duration\n sig_r_onset = signals[signals['ECG_R_Onsets'] == 1]\n sig_r_offset = signals[signals['ECG_R_Offsets'] == 1]\n if (len(sig_r_onset) == len(sig_r_offset)):\n d_qrs_N = sig_r_offset.index.to_numpy().ravel() - sig_r_onset.index.to_numpy().ravel() #number of samples between R Onset and Offset\n d_qrs_t = (d_qrs_N - 1) / sampling_rate\n d_qrs_t_mean = d_qrs_t.mean()\n d_qrs_t_std = d_qrs_t.std()\n else:\n #TODO: in case of unenven R Onset and Offset detection develop more sofisticated algo to check which peaks can be retained?\n d_qrs_t_mean = np.nan\n d_qrs_t_std = np.nan\n \n \n summary.append(d_qrs_t_mean)\n summary.append(d_qrs_t_std)\n \n return summary\n\n#%% extract features from ECGs\ndef extract_features(df, Fs, feature_list, remove_outlier, biosppy_enabled, ecg_quality_check, ecg_quality_threshold, class_id, verbose):\n \n if remove_outlier:\n logging.info('Removing ecg outliers with pyheart...')\n \n if biosppy_enabled:\n logging.info('Pre-filtering ECG with biosspy')\n \n # Define F array to aggregate extracted sample features\n F=np.zeros([df.shape[0],len(feature_list)])\n \n # Define PD as a list array to aggregate extracted sample infos (for later plotting)\n # PD columns: [0:sample id | 1: class id | 2: raw signal| 3: r_peaks_biosspy | 4: filtered biosppy | 5: signals neurokit ]\n # PD rows: number of ecg signals\n plotData = []\n for n_row in range(df.shape[0]):\n column = []\n for n_col in range(6):\n column.append(0)\n plotData.append(column)\n \n # for all the rows in the df\n for i in range(len(df)):\n sig_i = df.iloc[i,1:] #signal i wo sample id\n sig_i = sig_i.replace(to_replace='NaN',value=np.nan)\n sig_i_np = (sig_i.to_numpy()).ravel()\n sig_i_np = sig_i_np[~np.isnan(sig_i_np)] #this is our ecg raw signal\n \n # remove outliers using pyheart?\n if remove_outlier:\n dummy=1 #TODO: remove outliers using pyheart?\n \n \n # filter ecg signal with biosspy first\n if biosppy_enabled:\n try:\n out = ecg.ecg(signal=sig_i_np, sampling_rate=Fs, show=False)\n \n # ts (array) – Signal time axis reference (seconds).\n # filtered (array) – Filtered ECG signal.\n # rpeaks (array) – R-peak location indices.\n # templates_ts (array) – Templates time axis reference (seconds).\n # templates (array) – Extracted heartbeat templates.\n # heart_rate_ts (array) – Heart rate time axis reference (seconds).\n # heart_rate (array) – Instantaneous heart rate (bpm).\n\n (ts, filtered_biosppy, rpeaks_biosppy, templates_ts, \n templates, heart_rate_ts, heart_rate) = out\n \n no_rpeaks_biosppy = len(rpeaks_biosppy)\n \n except Exception:\n logging.info(f'biosppy crashed for sample {i} in class {class_id}')\n rpeaks_biosppy = np.nan\n no_rpeaks_biosppy = np.nan\n filtered_biosppy = np.nan\n else:\n rpeaks_biosppy = np.nan\n no_rpeaks_biosppy = np.nan\n filtered_biosppy = np.nan\n\n \n # process ecg sample with with neurokit\n # signals, info = nk.ecg_process(sig_i_np, sampling_rate=Fs)\n # use customized function\n try: \n signals, info = ecg_process_AML(sig_i_np, sampling_rate=Fs)\n \n if ecg_quality_check:\n #TODO: keep only the signals with ecq quality above threshold?\n dummy=1\n \n # calculate ecg signal HR indicators\n df_analyze = nk.ecg_analyze(signals, sampling_rate=Fs, method='auto')\n \n # filter signals for peak counts, amplitudes, and QRS event duration\n peak_summary_neurokit = calc_peak_summary(signals=signals, sampling_rate=Fs)\n \n # calculate the mean and standard devation of the signal quality\n ecg_q_mean = signals['ECG_Quality'].mean() \n ecg_q_std = signals['ECG_Quality'].std()\n \n # consolidate the features for sample i\n feat_i = [df.iloc[i,0]] # init a list with sample id\n feat_i.append(ecg_q_mean)\n feat_i.append(ecg_q_std,)\n feat_i.append(df_analyze.iloc[0,0]) #ECG_Rate_Mean\n feat_i.append(df_analyze.iloc[0,1]) #HRV_RMSSD\n feat_i.append(len(rpeaks_biosppy)) #no. of detected r-peaks in biosspy\n for elem in peak_summary_neurokit:\n feat_i.append(elem)\n except Exception:\n logging.info(f'neurokit2 crashed for sample {i} in class {class_id}')\n n = len(feature_list)\n feat_i = [np.nan]*n\n feat_i[0] = df.iloc[i,0] # sample id\n feat_i[5] = no_rpeaks_biosppy #maybe biosppy worked\n \n F[i,:] = feat_i\n plotData = populate_PlotData(plotData,i,df.iloc[i,0],class_id,sig_i_np,rpeaks_biosppy,filtered_biosppy,signals)\n if verbose:\n sample_left = df.shape[0]-i\n print(f'Preprocessed ECG sample {i}({df.iloc[i,0]}) in class {class_id}... {sample_left} samples to go!')\n #TODO: in a suitable container collect the sample id and the signals dataframe (output of neurokit), which\n #which contains all the info for the plots\n \n feat_df = pd.DataFrame(data=F,columns=feature_list)\n \n return(feat_df, plotData) \n \n#%% Main\n\nrepopath = '/Users/francescofusaro/Documents/Github/aml2020'\nos.chdir(repopath)\n\n#%% Load data from repo (keep sample id for later use)\nX, y, X_test = load_data(repopath)\n\n#%% Split the original dataframe according to class\nX0, X1, X2, X3 = split_classes(X, y)\n\n#%% Define dataframe template in which will be filled with the extracted features\nfeature_list = ['Sample_Id', \n 'ECQ_Quality_Mean', 'ECQ_Quality_STD', \n 'ECG_Rate_Mean', 'ECG_Rate_STD',\n 'R_P_biosppy', 'P_P/R_P', 'Q_P/R_P', 'R_P_neurokit' ,'S_P/R_P', 'T_P/R_P', #relative number of peaks TODO\n 'P_Amp_Mean', 'P_Amp_STD', 'S_Amp_Mean', 'S_Amp_STD',\n 'QRS_t_Mean', 'QRS_t_STD']\n\n\n#%% Feature extraction class 0\nX0_features, X0_plotData = extract_features(df=X0,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=True, \n biosppy_enabled=True, \n ecg_quality_check=True, \n ecg_quality_threshold=0.8, \n class_id=0,\n verbose=True\n )\n\nX0_features.head()\n#%% Feature extraction class 1\nX1_features, X1_plotData = extract_features(df=X1,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=False, \n biosppy_enabled=True, \n ecg_quality_check=False, \n ecg_quality_threshold=0.8, \n class_id=1,\n verbose=True\n )\nX1_features.head()\n#%% Feature extraction class 2\nX2_features, X2_plotData = extract_features(df=X2,\n Fs = 300,\n feature_list = feature_list, \n remove_outlier=True, \n biosppy_enabled=True, \n ecg_quality_check=True, \n ecg_quality_threshold=0.8, \n class_id=2,\n verbose=True\n )\nX2_features.head()\n#%% Write pickle or similar\nsave_pickle = True\nif save_pickle:\n df_hash_f = lambda obj: hashlib.sha1(pd.util.hash_pandas_object(obj).values).hexdigest()\n X2_features_hash = df_hash_f(X2_features)","sub_path":"project3_ffu/feat_extr_prj3.py","file_name":"feat_extr_prj3.py","file_ext":"py","file_size_in_byte":13764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"450937721","text":"import numpy as np\nfrom rvv_io import *\nfrom rvv_functions import *\nfrom rvv_solution import *\nfrom rvv_pushers import *\nfrom rvv_fields import *\nfrom rel_col18 import implicit_coll\nfrom rel_sdc2 import *\nfrom gauss_legendre import CollGaussLegendre\nfrom gauss_lobatto import CollGaussLobatto\n\nsims = [10,20,40,80,160,320]\n# sims = [100]\ntend = 1\n\nM = 3\nK = 2\n\nc = 29979\nq = 1\n\n# gamma_max = 1.0000000000005\ngamma_max = 5.\nbeta_max = np.sqrt(1-1./gamma_max**2.)\nuy_max = beta_max*c\nnew = True\n\nfor Nt in sims:\n dt = tend/Nt\n\n nq = 1\n\n pos = np.zeros((nq,3),dtype=np.float)\n vel = np.zeros((nq,3),dtype=np.float)\n\n vel[:,1] = np.linspace(uy_max/8,uy_max,nq)\n vel[:,2] = np.sqrt(1-1./gamma_max**2.)\n\n pos = np.array([[10.,0.,0.]])\n vel = np.array([[100.,0.,100.]])\n\n # gamma = gu(vel,c=c)\n # lfreq = -q*Bfield/(1*c*gamma)\n # larmor = vel[:,1]/gamma/lfreq\n # #larmor = 1*vel[:,1]/(-q*B)\n # pos[:,0] = larmor\n\n t = 0\n\n x_array = [pos]\n x2_array = [pos]\n v_array = [vel]\n t_array = [t]\n\n col = coll(CollGaussLobatto,dt,nq,K=K,M=M,c=c,q=q,predictor=True)\n rx_array = [np.linalg.norm(col.Rx,axis=1)]\n rv_array = [np.linalg.norm(col.Rv,axis=1)]\n\n # Collocation solution stuff\n posc = np.copy(pos)\n velc = np.copy(vel)\n colc = coll(CollGaussLobatto,dt,nq,M=5,K=1,c=c,q=q)\n\n for ti in range(1,Nt+1):\n t = ti*dt\n\n pos, vel, col = boris_SDC(pos,vel,col)\n # posc, velc, colc = implicit_coll(posc,velc,colc)\n rx_array.append(np.linalg.norm(col.Rx,axis=1))\n rv_array.append(np.linalg.norm(col.Rv,axis=1))\n x2_array.append(posc)\n x_array.append(pos)\n v_array.append(vel)\n t_array.append(t)\n\n # colc.calc_residual_2018(1)\n # col.calc_residual_2018(4)\n # errorx = np.abs(col.x[2:,0,:]-np.around(colc.x[2:,0,:],14))/np.abs(np.around(colc.x[2:,0,:],14))\n # errorf = np.abs(col.F[2:,0,:]-np.around(colc.F[2:,0,:],14))/np.abs(np.around(colc.F[2:,0,:],14))\n # erroru = np.abs(col.u[2:,0,:]-np.around(colc.u[2:,0,:],14))/np.abs(np.around(colc.u[2:,0,:],14))\n # print(\"Diff in x: {0}\".format(errorx))\n # print(\"Diff in F: {0}\".format(errorf))\n # print(\"Diff in u: {0}\".format(erroru))\n # print(\"SDC solution: {0}\".format(col.Rv))\n # print(\"Collocation solution: {0}\".format(colc.Rv))\n rx_array = np.array(rx_array)\n rv_array = np.array(rv_array)\n x_array = np.array(x_array)\n x2_array = np.array(x2_array)\n v_array = np.array(v_array)\n t_array = np.array(t_array)\n\n if col.predictor == True:\n rhs = (M-1)*(K+1)*Nt\n else:\n rhs = (M-1)*K*Nt\n\n wp_dump(t_array,x_array,v_array,dt,\"sdc_M{0}K{1}_wp_vvrel.h5\".format(M,K),rhs=rhs,new=new)\n new = False\n\nplot_xres(t_array,rx_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\nplot_vres(t_array,rv_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\nplot_isotraj(x_array,\"sdc_\"+str(Nt),label=\"sim\")\n# plot_isotraj(x2_array,\"col2_\"+str(Nt),label=\"sim\")\nplot_vel(t_array,v_array,\"sdc_\"+str(Nt),label=\"sim\")\n","sub_path":"rvv_sdc.py","file_name":"rvv_sdc.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"147878078","text":"import torch\n# import torch should be first. Unclear issue, mentioned here: https://github.com/pytorch/pytorch/issues/2083\nimport torch.nn as nn\n\nfrom collections import OrderedDict\n\nfrom models import TernausNet, unet, checkpointed_unet, inception\n\n\ndef maxpool_level(model, num_bands, size):\n \"\"\"Calculate and return the number of maxpool inside the model definition.\n This function is useful during inference in order to calculate the number of pixel required as context.\n \"\"\"\n def register_hook(module):\n def hook(module, input, output):\n class_name = str(module.__class__).split('.')[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = '%s-%i' % (class_name, module_idx + 1)\n summary[m_key] = OrderedDict()\n\n if not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model):\n hooks.append(module.register_forward_hook(hook))\n\n input_size = (num_bands, size, size)\n x = torch.rand(1, *input_size).type(torch.FloatTensor)\n\n summary = OrderedDict()\n hooks = []\n model.apply(register_hook)\n model(x)\n # remove these hooks\n for h in hooks:\n h.remove()\n\n maxpool_count = 0\n for layer in summary:\n if layer.startswith(\"MaxPool2d\"):\n maxpool_count += 1\n return {'MaxPoolCount': maxpool_count}\n\n\ndef net(net_params, rtn_level=False):\n \"\"\"Define the neural net\"\"\"\n model_name = net_params['global']['model_name'].lower()\n state_dict_path = ''\n if model_name == 'unetsmall':\n model = unet.UNetSmall(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unetsmall']['dropout'],\n net_params['models']['unetsmall']['probability'])\n if net_params['models']['unetsmall']['pretrained']:\n state_dict_path = net_params['models']['unetsmall']['pretrained']\n elif model_name == 'unet':\n model = unet.UNet(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unet']['dropout'],\n net_params['models']['unet']['probability'])\n if net_params['models']['unet']['pretrained']:\n state_dict_path = net_params['models']['unet']['pretrained']\n elif model_name == 'ternausnet':\n model = TernausNet.ternausnet(net_params['global']['num_classes'],\n net_params['models']['ternausnet']['pretrained'])\n elif model_name == 'checkpointed_unet':\n model = checkpointed_unet.UNetSmall(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'],\n net_params['models']['unetsmall']['dropout'],\n net_params['models']['unetsmall']['probability'])\n if net_params['models']['unetsmall']['pretrained']:\n state_dict_path = net_params['models']['unetsmall']['pretrained']\n elif model_name == 'inception':\n model = inception.Inception3(net_params['global']['num_classes'],\n net_params['global']['number_of_bands'])\n if net_params['models']['inception']['pretrained']:\n state_dict_path = net_params['models']['inception']['pretrained']\n else:\n raise ValueError('The model name in the config.yaml is not defined.')\n\n if rtn_level:\n lvl = maxpool_level(model, net_params['global']['number_of_bands'], 256)\n return model, state_dict_path, lvl['MaxPoolCount']\n else:\n return model, state_dict_path, model_name\n","sub_path":"models/model_choice.py","file_name":"model_choice.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"512255318","text":"\"\"\"Sorted, reveresed and lamba function\"\"\"\r\n\r\nlst=['james','smith','blake','king','meena']\r\n\r\nfor i in sorted(lst): #Sorted\r\n print(i,end=\" \")\r\n\r\nprint()\r\n\r\nfor i in reversed(lst): #reversed\r\n print(i,end=\" \")\r\n\r\n\r\ndef mys(lst):\r\n return lst[-1]\r\n\r\nlst.sort(key=mys)\r\n\r\n\r\nlst.sort(Key=lambda name:name[-1]) #to sort the list based on last letter\r\n #using lamba function\r\n","sub_path":"26-11 sorted.py","file_name":"26-11 sorted.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"227051920","text":"from collections import OrderedDict\nfrom ming import create_datastore\nfrom stores.mongo_store import MongoStore\nfrom unittest import TestCase\n\nurl = 'mongodb://localhost:27017/metahosting_tests'\n\n\nclass MongoStoreTest(TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n conn = create_datastore(url).conn\n conn.drop_database('metahosting_tests')\n\n def get_store(self):\n config = dict()\n config['url'] = url\n config['database'] = 'metahosting_tests'\n config['collection'] = 'tests'\n return MongoStore(config=config)\n\n def test_update(self):\n store = self.get_store()\n self.assertIsNone(store.get('name'))\n store.update('name', 'value')\n self.assertIsNotNone(store.get('name'))\n store.update('name', 'new_value')\n self.assertEqual(store.get('name'), 'new_value')\n\n def test_get(self):\n store = self.get_store()\n self.assertIsNone(store.get('foo'))\n store.update('foo', 'bar')\n self.assertIsNotNone(store.get('foo'))\n self.assertEqual('bar', store.get('foo'))\n\n def test_get_all(self):\n store = self.get_store()\n ret = store.get_all()\n self.assertDictEqual({}, ret)\n a = dict()\n a['foo'] = 'bar'\n a['foo2'] = 'bar2'\n a[1] = 'some'\n a['foo3'] = 21211\n for key, value in a.iteritems():\n store.update(key, value)\n\n self.assertDictEqual(a, store.get_all())\n\n def test_get_all_sorted(self):\n store = self.get_store()\n ret = store.get_all(sort_key='anything')\n self.assertDictEqual({}, ret)\n\n a = OrderedDict()\n a['foo'] = 'bar'\n a['foo2'] = 'bar2'\n a[1] = 'some'\n a['foo3'] = 21211\n for key, value in a.iteritems():\n store.update(key, value)\n\n self.assertDictEqual(a, store.get_all(sort_key='anything'))\n\n def test_update_with_dict(self):\n store = self.get_store()\n simple_dict = dict()\n simple_dict['a'] = {'foo': 'bar'}\n store.update('foo', simple_dict)\n simple_dict['a'] = {'ooo': 'barr'}\n ret = store.get('foo')\n self.assertTrue('a' in ret)\n self.assertFalse('b' in ret)\n ret['c'] = {'foo': 'ba'}\n ret2 = store.get('foo')\n self.assertFalse('c' in ret2)\n\n def test_constrained(self):\n store = self.get_store()\n for i in range(0, 10):\n store.update('key%s' % i, {'foo': 'bar',\n 'numerical': i,\n 'textual': '%sval' % i})\n\n key = 'value.numerical'\n result = store.get_constrained({key: {'$lt': 0}})\n self.assertEqual(len(result), 0)\n\n result = store.get_constrained({key: {'$lt': 5}})\n self.assertEqual(len(result), 5)\n\n result = store.get_constrained({key: {'$lt': 100}})\n self.assertEqual(len(result), 10)\n\n # mongo 2.4 (as provided by travis) does not have $eq\n key = 'value.textual'\n result = store.get_constrained({key: '8val'})\n self.assertEqual(len(result), 1)\n\n key = 'value.foo'\n result = store.get_constrained({key: 'bar'})\n self.assertEqual(len(result), 10)\n\n result = store.get_constrained({})\n self.assertEqual(len(result), 10)\n\n def get_service(self, name, description, availability):\n service = dict()\n service['name'] = name\n service['description'] = description\n service['available'] = availability\n return service\n\n def test_alternative_type_retrieval(self):\n store = self.get_store()\n service = self.get_service('neo4j', 'database', True)\n store.update(service['name'], service)\n service = self.get_service('eXist', 'XML', True)\n store.update(service['name'], service)\n service = self.get_service('voyant', 'GUI', False)\n store.update(service['name'], service)\n\n result = store.get_all()\n self.assertEqual(len(result), 3)\n\n result = store.get_constrained(constrain={'value.available': True})\n self.assertEqual(len(result), 2)\n\n result = store.get_constrained(constrain={'value.available': False})\n self.assertEqual(len(result), 1)\n\n service = self.get_service('neo5j', 'New database', True)\n service.pop('available')\n store.update(service['name'], service)\n\n result = store.get_constrained(constrain={'value.available': True})\n self.assertEqual(len(result), 2)\n\n result = store.get_constrained(constrain={'value.available': False})\n self.assertEqual(len(result), 1)\n\n result = store.get_constrained(constrain={})\n self.assertEqual(len(result), 4)\n","sub_path":"tests/test_mongo_store.py","file_name":"test_mongo_store.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"157021048","text":"#!/usr/bin/python\n# -*-coding:utf-8 -*\n\nimport curses\nimport curses.wrapper\nimport curses.panel\nimport curses.textpad\nimport sys\nimport pickle\nimport os\nimport datetime\nimport locale\n\nclass Liste():\n\t\n\tdef __init__(self, index, titre, descr, prior=\"L\"):\n\t\tself.creation = (datetime.date.today())\n\t\tself.ref = index\n\t\tself.titre = titre\n\t\tself.descr = descr\n\t\tself.prior = prior\n\t\t\n\tdef __repr__(self):\n\t\treturn \"({0}) - {3}\\n\t{1}: {2}\\n\".format(\\\n\t\tself.ref, self.titre, self.descr, self.creation)\n\t\n\tdef __getstate__(self):\n\t\tdict_attr = dict(self.__dict__)\n\t\tdict_attr[\"attribut_temporaire\"] = 0\n\t\treturn dict_attr\n\t\n\tdef __setstate__(self, dict_attr):\n\t\tdict_attr[\"attribut_temporaire\"] = 0\n\t\tself.__dict__ = dict_attr\n\ndef getMaxYX(self, char):\n\tmaxYX = self.getmaxyx()\n\tif char == 'X':\n\t\treturn maxYX[1]\n\tif char == 'Y':\n\t\treturn maxYX[0]\n\t\t\n\t\ndef about_box(pan_about, about):\n\tpan_about.show()\n\tabout.getch()\n\tpan_about.hide()\n\t\ndef Index():\n\tif os.path.exists(\"index\"):\n\t\tindex = open(\"index\", \"r\")\n\t\tnbItems = int(index.read())\n\t\tindex.close()\n\t\treturn nbItems\n\telse:\n\t\tindex = open(\"index\", \"w\")\n\t\tindex.write(str(0))\n\t\tindex.close()\n\t\treturn Index()\n\t\t\n\ndef UpdateIndex(nbItems, test):\n\tindex = open(\"index\", \"w\")\n\tif test == \"up\":\n\t\tindex.write(str(nbItems + 1))\n\telse:\n\t\tindex.write(str(nbItems - 1))\n\tindex.close()\n\t\ndef RecupDonneesBis(nbItems):\n\tmesNotes = []\n\tif os.path.exists(\"todo_liste\"):\n\t\twith open('todo_liste', 'rb') as todo:\n\t\t\tmy_pickler = pickle.Unpickler(todo)\n\t\t\ti = 1\n\t\t\twhile i <= nbItems:\n\t\t\t\tmesNotes.append(my_pickler.load())\n\t\t\t\ti += 1\n\t\t\treturn mesNotes\n\telse:\n\t\treturn mesNotes\n\t\t\ndef EnregDonnees(nbItems, mesNotes):\n\twith open('todo_liste', 'wb') as todo:\n\t\tmy_pickler = pickle.Pickler(todo)\n\t\ti = 1\n\t\twhile i <= nbItems:\n\t\t\tmy_pickler.dump(mesNotes[i-1])\n\t\t\ti += 1\n\ndef AffichageNote_bis():\n\tliste_note = []\n\tliste_note = RecupDonneesBis(Index())\n\treturn liste_note\n\t\t\ndef NouvelleNoteBis(titre, descr):\n\t\n\ttitre = titre[2:len(titre)-1]\n\tdescr = descr[2:len(descr)-1]\n\t\n\tmesNotes = RecupDonneesBis(Index())\n\tUpdateIndex(Index(), \"up\")\n\tmesNotes.append(Liste(Index(), titre.capitalize(), descr.capitalize()))\n\tEnregDonnees(Index(), mesNotes)\n\t\ndef SupressionNoteBis(id_note):\n\tif Index() != 0:\n\t\tmesNotes = RecupDonneesBis(Index())\n\t\tdel mesNotes[id_note - 1]\n\t\tUpdateIndex(Index(), \"down\")\n\t\ti = 1\n\t\twhile i <= Index():\n\t\t\tmesNotes[i-1].ref = i\n\t\t\ti += 1\n\t\tEnregDonnees(Index(), mesNotes)\n\n\nclass TODO(object):\n\t\t\n\tmenu = [\"n:\",\"New\",\"d:\",\"Delete\",\"t:\",\"Toogle\",\"q:\",\"Quit\",\"a:\",\"About\"]\n\titem = 1\n\t\n\tdef __init__(self, scr):\n\t\tself.scr = scr\n\t\tself.init_curses_mode()\n\t\n\tdef init_curses_mode(self):\n\t\tself.init_curses()\n\t\tself.draw_menu_bottom()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.show_notes()\n\t\tself.create_about()\n\t\tself.handle_key_stroke()\n\n\tdef init_curses(self):\n\t\tlocale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')\n\t\tcurses.noecho()\n\t\tcurses.cbreak()\n\t\tcurses.curs_set(0)\n\t\tself.scr.keypad(1)\n\t\tself.scr.erase()\n\t\tcurses.use_default_colors()\n\t\tcurses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)\n\t\tcurses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_GREEN)\n\t\tcurses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_RED)\n\t\tcurses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_BLUE)\n\t\tcurses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_YELLOW)\n\t\tcurses.init_pair(6, curses.COLOR_BLUE, -1)\n\t\n\tdef draw_menu_bottom(self):\n\t\tself.maxyx = self.scr.getmaxyx()\n\t\tself.bot = self.scr.subwin(3, self.maxyx[1], self.maxyx[0] - 3, 0)\n\t\t\n\t\tj=2\n\t\tfor i, elt in enumerate(self.menu):\n\t\t\tif i%2 == 0:\n\t\t\t\tself.bot.addstr(1, j, elt, curses.A_BOLD)\n\t\t\t\tj += len(elt)\n\t\t\telse:\n\t\t\t\tself.bot.addstr(1, j, elt, curses.color_pair(1))\n\t\t\t\tj += len(elt) + 2\n\t\t\t\t\n\t\tself.bot.addstr(1, self.maxyx[1] - 5, \"v1.0\")\n\t\t#self.bot.border(0)\n\t\tself.bot.refresh()\n\t\t\n\tdef draw_menu_top(self):\n\t\tself.top = self.scr.subwin(self.maxyx[0] - 3, self.maxyx[1], 0, 0)\n\t\t\n\t\tself.pad = self.top.subpad(3, self.maxyx[1] - 2, 0, 1)\n\t\t\n\t\tfor y in range(0,3):\n\t\t\tfor x in range(0, self.maxyx[1] - 2):\n\t\t\t\ttry: self.pad.addstr(y, x, \" \", curses.color_pair(1))\n\t\t\t\texcept curses.error: pass\n\t\t\t\t\n\t\tself.top.attron(curses.A_BOLD)\t\t\n\t\tself.top.addstr(1, 2, \"TODO - Notes Manager\")\n\t\tself.top.attroff(curses.A_BOLD)\t\n\t\tself.pad.refresh()\n\t\tself.top.refresh()\n\t\t\n\tdef draw_notes(self):\n\t\tself.notes = self.top.subwin(self.maxyx[0] - 6, self.maxyx[1], 3, 0)\n\n\t\tif Index() != 0:\n\t\t\tself.notes.addstr(1, self.maxyx[1] - 15, \"{0} Notes saved\".format(Index()), curses.A_BLINK)\n\t\telse:\n\t\t\tself.notes.addstr(1, self.maxyx[1] - 15, \"0 Note saved\")\n\t\t#self.notes.border(0)\n\t\tself.notes.refresh()\n\t\t\t\n\tdef show_notes(self):\n\t\tmesNotes = []\n\t\tmesNotes = AffichageNote_bis()\n\t\t\n\t\tmaxY = getMaxYX(self.notes, 'Y')\n\t\t\n\t\tj=5\n\t\theight = maxY - 3\n\t\t\n\t\tfor i, elt in enumerate(mesNotes):\n\t\t\t\n\t\t\tif i + 1 >=self.item:\n\t\t\t\tif height >= 4:\n\t\t\t\t\tcadre = \"notes_\" + str(i)\n\t\t\t\t\tself.cadre = self.notes.subwin(4, self.maxyx[1] - 2, j, 1)\n\t\t\t\t\t\n\t\t\t\t\tif mesNotes[i].ref == self.item:\n\t\t\t\t\t\tself.cadre.addstr(1, 4, \"N°{0} - {1}\".format(mesNotes[i].ref, mesNotes[i].titre), curses.color_pair(5))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.cadre.addstr(1, 4, \"N°{0} - {1}\".format(mesNotes[i].ref, mesNotes[i].titre), curses.color_pair(1))\n\t\t\t\t\t\t\n\t\t\t\t\tif mesNotes[i].prior == \"L\":\n\t\t\t\t\t\tself.cadre.addstr(1, 2, \"{0}\".format(mesNotes[i].prior), curses.color_pair(2))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.cadre.addstr(1, 2, \"{0}\".format(mesNotes[i].prior), curses.color_pair(3))\n\t\t\t\t\t\t\n\t\t\t\t\tk = 1\n\t\t\t\t\ta = 0\n\t\t\t\t\tb = getMaxYX(self.scr, 'X') - 33\n\t\t\t\t\twhile k<=2:\n\t\t\t\t\t\tchaine = mesNotes[i].descr\n\t\t\t\t\t\tchaine = chaine[a:b]\n\t\t\t\t\t\tself.cadre.addstr(k, 30, \"{0}\".format(chaine))\n\t\t\t\t\t\ta = b\n\t\t\t\t\t\tb = 2 * b\n\t\t\t\t\t\tk += 1\n\t\t\t\t\t\n\t\t\t\t\tself.cadre.addstr(2, 4, \"{0}\".format(mesNotes[i].creation))\n\t\t\t\t\t\n\t\t\t\t\tself.cadre.border(1,1,0,0,0,0,0,0)\n\t\t\t\t\tself.cadre.refresh()\n\t\t\t\t\tj += 4\n\t\t\t\t\tk += 2\n\t\t\t\t\theight -= 4\n\t\tself.notes.vline(3,29,'|',self.maxyx[0] - 8)\n\t\t\t\t\n\tdef navigate_up(self):\n\t\tif(self.item >= 2):\n\t\t\tself.item -= 1\n\t\t\tself.affichage_refresh()\n\t\t\t\n\tdef navigate_down(self):\n\t\tif(self.item < Index()):\n\t\t\tself.item += 1\n\t\t\tself.affichage_refresh()\n\t\t\t\n\tdef suppression(self):\n\t\tSupressionNoteBis(int(self.item))\n\t\tif self.item != 1:\n\t\t\tself.item -= 1\n\t\tself.affichage_refresh()\n\t\t\n\tdef create_about(self):\n\t\theight = 9\n\t\twidth = 47\n\t\t\n\t\tself.about = curses.newwin(height, width, int(1 / 2 * getMaxYX(self.scr, 'Y') - height / 2), int(1 / 2 * getMaxYX(self.scr, 'X') - width / 2))\n\t\tself.about.border(0)\n\t\tself.about.addstr(1, width - 11 , \"About TODO\", curses.color_pair(6))\n\t\tself.about.addstr(2, 1, \" ***** * ** *\")\n\t\tself.about.addstr(3, 1, \" * * * * * * *\")\n\t\tself.about.addstr(4, 1, \" * * * * * * * Brought to you by:\")\n\t\tself.about.addstr(5, 1, \"* * *** * Vincent Cottineau\")\n\t\tfor x in range(1, width - 1):\n\t\t\tself.about.addch(7, x, \"-\")\n\t\tself.about.addstr(8, 15, \"<>\")\n\t\tself.pan_about = curses.panel.new_panel(self.about)\n\t\t\n\tdef affichage_refresh(self):\n\t\tself.scr.erase()\n\t\tself.draw_menu_bottom()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.show_notes()\n\t\tself.create_about()\n\t\tself.scr.refresh()\n\t\tself.bot.refresh()\n\t\tself.top.refresh()\n\t\tself.notes.refresh()\n\t\treturn True\n\t\t\n\tdef toggle(self):\n\t\tmesNotes = []\n\t\tmesNotes = AffichageNote_bis()\n\t\tif(mesNotes[int(self.item) - 1].prior) == 'L':\n\t\t\tmesNotes[int(self.item) - 1].prior = 'H'\n\t\telse:\n\t\t\tmesNotes[int(self.item) - 1].prior = 'L'\n\t\tEnregDonnees(Index(), mesNotes)\n\t\tself.affichage_refresh()\n\t\t\n\tdef nouvelle_entree(self):\n\t\tself.scr.erase()\n\t\tself.draw_menu_top()\n\t\tself.draw_notes()\n\t\tself.bot.refresh()\n\t\t\n\t\tself.cadre_new = self.notes.subwin(4, getMaxYX(self.scr, 'X') - 2, 5, 1)\n\t\t#self.cadre_new.border(1,1,0,0,0,0,0,0)\n\t\t\n\t\tself.cadre_new.addstr(1, 4, \"N°1 -\", curses.color_pair(1))\n\t\tself.cadre_new.addstr(1, 2, \"L\", curses.color_pair(2))\n\t\tself.cadre_new.addstr(2, 4, \"{0}\".format(datetime.date.today()))\n\t\t\t\t\n\t\tcurses.echo()#;curses.nocbreak()\n\t\tcurses.textpad.rectangle(self.cadre_new, 0, 9, 2, 25)\n\t\tinp_1 = self.cadre_new.getstr(1,10,15)\n\t\t\n\t\tcurses.textpad.rectangle(self.cadre_new, 0, 29, 2, getMaxYX(self.scr, 'X') - 4)\n\t\tinp_2 = self.cadre_new.getstr(1, 30, getMaxYX(self.scr, 'X') - 34)\n\t\t\n\t\tNouvelleNoteBis(str(inp_1), str(inp_2))\n\t\t\n\t\tself.cadre_new.erase()\n\t\tcurses.noecho()\n\t\tself.affichage_refresh()\n\t\t\n\t\t\t\t\n\tdef handle_key_stroke(self):\n\t\twhile True:\n\t\t\tch = self.scr.getch()\n\t\t\tif ch == ord('q'):\n\t\t\t\tcurses.endwin()\n\t\t\t\tbreak\n\t\t\telif ch == ord('d'):\n\t\t\t\tself.suppression()\n\t\t\telif ch == ord('a'):\n\t\t\t\tabout_box(self.pan_about, self.about)\n\t\t\telif ch == ord('n'):\n\t\t\t\tself.nouvelle_entree()\n\t\t\telif ch == ord('t'):\n\t\t\t\tself.toggle()\n\t\t\telif ch == curses.KEY_RESIZE:\n\t\t\t\tself.affichage_refresh()\n\t\t\telif ch == curses.KEY_UP:\n\t\t\t\tself.navigate_up()\n\t\t\telif ch == curses.KEY_DOWN:\n\t\t\t\tself.navigate_down()\n\t\t\nif __name__ == '__main__':\n\t\t\t\n\tdirectory = os.environ.get(\"HOME\") + \"/.todo\"\n\t\n\tif os.path.isdir(directory) == True:\n\t\tsys.stdout.write(directory)\n\t\tos.chdir(directory)\n\telse:\n\t\tos.mkdir(directory)\n\t\tos.chdir(directory)\n\tcurses.wrapper(TODO)\n\n","sub_path":"2do-curses.py","file_name":"2do-curses.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"283174892","text":"from time import clock, time, perf_counter\nfrom functools import reduce\n\n\ndef secondsToStr(t):\n return \"%d:%02d:%02d.%03d\" % \\\n reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],\n [(t * 1000,), 1000, 60, 60])\n\n\ndef now():\n return secondsToStr(perf_counter())\n\n\nclass Timing:\n line = \"=\" * 40\n\n def __init__(self, name):\n self.name = name\n self.log(\"Starting Test: \"+name)\n self.start = perf_counter()\n\n def log(self, s, elapsed=None):\n print(self.line)\n print(secondsToStr(perf_counter()), '-', s)\n if elapsed:\n print(\"Elapsed time:\", elapsed)\n print(self.line)\n print\n\n def end_log(self):\n end = perf_counter()\n elapsed = end - self.start\n self.log(\"End Program\", secondsToStr(elapsed))\n\n#timer = Timing()\n#timer.start = clock()\n#atexit.register(timer.end_log)\n#timer.log(\"Start Program\")\n","sub_path":"CS420 Project/CaffeVideoDetection/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"508824890","text":"\nfrom Parser import Parser\nfrom Colorizer import Model\nfrom skimage.io import imsave\nimport numpy as np\n\nparser = Parser( 64 )\nmodel = Model()\n\nX = parser.prepare_images_from_dir( 'train_images/' , 'grayscale' )\nY = parser.prepare_images_from_dir( 'train_images/' )\ntest_X = parser.prepare_images_from_dir( 'test_images/' , 'grayscale' )\n\nnp.save( 'sample_data/X.npy' , X )\nnp.save( 'sample_data/Y.npy' , Y )\nnp.save( 'sample_data/test_X.npy' , test_X )\nprint( 'data processed' )\n\nmodel.load_model( 'models/final_model.h5' )\n\n#model.fit( X , Y , number_of_epochs=100 )\n#model.save_model( 'models/model.h5')\n\nvalues = model.predict( test_X )\nvalues = np.maximum( values , 0 )\nfor i in range( test_X.shape[0] ):\n image_final = ( values[i] * 255).astype( np.uint8)\n imsave( 'predictions/{}.png'.format( i + 1 ) , image_final )\n\n","sub_path":"MainFile.py","file_name":"MainFile.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"617960704","text":"import sys\nimport tkinter as tk\nfrom tkinter import *\nimport urllib.request\nimport webbrowser\nfrom functools import partial\nfrom tkinter import Tk, StringVar , ttk\nfrom . import convert_temp\n\n\ndef TemperatureConverter():\n def convert():\n celTemp = celTempVar.get()\n fahTemp = fahTempVar.get()\n kelTemp = kelTempVar.get()\n\n if celTempVar.get() != 0.0:\n fahTempVar.set(convert_temp.convert_celToFah(celTemp))\n kelTempVar.set(convert_temp.covert_celToKel(celTemp))\n\n elif fahTempVar.get() != 0.0:\n celTempVar.set(convert_temp.convert_fahToCel(fahTemp))\n kelTempVar.set(convert_temp.convert_fahToKel(fahTemp))\n \n elif kelTempVar.get() !=0.0:\n celTempVar.set(convert_temp.convert_kelToCel(kelTemp))\n fahTempVar.set(convert_temp.convert_kelTofah(kelTemp))\n \n\n def reset():\n top = Toplevel(padx=50, pady=50)\n top.grid()\n message = Label(top, text = \"Reset Complete\")\n button = Button(top, text=\"OK\", command=top.destroy)\n\n message.grid(row = 0, padx = 5, pady = 5)\n button.grid(row = 1, ipadx = 10, ipady = 10, padx = 5, pady = 5)\n\n fahTempVar.set(int(0))\n celTempVar.set(int(0))\n kelTempVar.set(int(0))\n \n top = Toplevel()\n top.title(\"Temperature Converter\")\n \n celTempVar = IntVar()\n celTempVar.set(int(0))\n fahTempVar = IntVar()\n fahTempVar.set(int(0))\n kelTempVar = IntVar()\n kelTempVar.set(int(0))\n titleLabel = Label (top, text = \"Temperature Converter\", font = (\"Arial\", 12, \"bold\"), justify = CENTER).grid(column=1,row=1)\n \n\n celLabel = Label (top, text = \"Celcius: \", font = (\"Arial\", 16), fg = \"red\")\n celLabel.grid(row = 2, column = 1, pady = 10, sticky = NW)\n\n fahLabel = Label (top, text = \"Fahrenheit: \", font = (\"Arial\", 16), fg = \"blue\")\n fahLabel.grid(row = 3, column = 1, pady = 10, sticky = NW)\n \n kelLabel = Label (top, text = \"Kelvin: \", font = (\"Arial\", 16), fg = \"black\")\n kelLabel.grid(row = 4, column = 1, pady = 10, sticky = NW)\n\n celEntry = Entry (top, width = 10, bd = 5, textvariable = celTempVar)\n celEntry.grid(row = 2, column = 1, pady = 10, sticky = NW, padx = 125 )\n\n\n fahEntry = Entry (top, width = 10, bd = 5, textvariable = fahTempVar)\n fahEntry.grid(row = 3, column = 1, pady = 10, sticky = NW, padx = 125 )\n \n kelEntry = Entry (top, width = 10, bd = 5, textvariable = kelTempVar)\n kelEntry.grid(row = 4, column = 1, pady = 10, sticky = NW, padx = 125 )\n\n convertButton =Button (top, text = \"Convert\", font = (\"Arial\", 8, \"bold\"), relief = RAISED, bd=5, justify = CENTER, highlightbackground = \"red\", overrelief = GROOVE, activebackground = \"green\", activeforeground=\"blue\", command = convert)\n convertButton.grid(row = 5, column = 1, ipady = 8, ipadx = 12, pady = 5, sticky = NW, padx = 55)\n\n resetButton = Button (top, text = \"Reset\", font = (\"Arial\", 8, \"bold\"), relief = RAISED, bd=5, justify = CENTER, highlightbackground = \"red\", overrelief = GROOVE, activebackground = \"green\", activeforeground=\"blue\", command = reset)\n resetButton.grid(row = 5, column = 2,ipady = 8, ipadx = 12, pady = 5, sticky = NW)","sub_path":"src/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"611875683","text":"__author__ = 'ian'\n\nfrom django.conf.urls import patterns, include, url\n\nfrom abstracts import views\n\nurlpatterns = patterns('',\n # ex: /abstracts/\n url(r'^$', views.index, name='index'),\n url(r'journals$', views.get_all_journals, name='get_all_journals'),\n # ex: /abstracts/nature/abstracts\n\n # ?{\n url(r'^(?P[\\s\\S]+)/abstracts/$', views.get_all_abstracts_for_journal, name=\"abstracts_by_journal\")\n)\n\n\n","sub_path":"Python/AbstractsApp/abstracts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"617703714","text":"import pytest\nfrom app import ProcessPayment\nfrom providers import CheapPaymentGateway, PremiumPaymentGateway, ExpensivePaymentGateway\nimport requests\nimport json\n\n\nurl = 'http://127.0.0.1:5000/api/v1/payment'\n\nPAYMENT_PROCESSED = ({\"message:\" : \"Payment is processed.\"}, 200)\nINVALID_REQUEST = ({\"error:\" : [\"The request is invalid.\"]}, 400)\nINTERAL_SERVER_ERROR = ({\"error:\" : [\"An error occur while processing the request.\"]}, 500)\n\n@pytest.fixture(scope='module')\ndef request_data():\n payment_info = { \"creditCardNumber\": \"6069980060280276\",\n\t\t\t\t \"cardHolder\": 12222.5,\n\t\t\t\t \"expirationDate\": \"11/25\",\n\t\t\t\t \"securityCode\": \"234\",\n\t\t\t\t \"amount\": 501.7 }\n return payment_info\n\n\ndef test_invalid_credit_card_number(request_data):\n\trequest_data[\"creditCardNumber\"] = \"sadndnsad636363\"\n\tres_data = requests.post(url, data = request_data)\n\tassert 500 == res_data.status_code\n\n\ndef test_validation_credit_card_number(request_data):\n\trequest_data[\"creditCardNumber\"] = \"6069980060280276\"\n\tres_data = requests.post(url, data = request_data)\n\tassert 200 == res_data.status_code\n\tjson_response=json.dumps(res_data.json())\n\n\ndef test_validation_invalid_expiration_date(request_data):\n\trequest_data[\"expirationDate\"]= \"25/00\"\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\ndef test_validation_mandatory_data(request_data):\n\tdel request_data[\"expirationDate\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"creditCardNumber\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"amount\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\tdel request_data[\"cardHolder\"]\n\tres_data = requests.post(url, data = request_data)\n\tjson_response=json.dumps(res_data.json())\n\tassert 500 == res_data.status_code\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PaymentApp/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"567606631","text":"from flask import Flask, request, render_template\n\nfrom apis import cat_img_api, cat_fact_api, cat_video_api\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home_page():\n return render_template('index.html')\n\n\n@app.route('/get-cat')\ndef get_cat():\n category = request.args.get('category') #or 'space' # set a default\n\n cat_img_url = cat_img_api.get_cat(category)\n cat_fact = cat_fact_api.get_random_fact()\n cat_video = cat_video_api.cat_video(category)\n\n if cat_img_url and cat_fact and cat_video:\n return render_template('cat.html', cat_img=cat_img_url, category=category, cat_fact=cat_fact, cat_video=cat_video)\n else:\n return render_template('error.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"365641935","text":"import gym\nimport torch\nimport math\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom csv import writer, reader\n\nimport torch.optim as optim\nimport torch.distributions.categorical as categorical\nfrom gym_miniworld.wrappers import *\nfrom A2CNN3 import *\nfrom rpm2 import rpm\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass Agent(object):\n def __init__(self, **kwargs):\n self.lr_act = 0.0009\n print(self.lr_act)\n self.lr_crit = 0\n self.batch_size = 64\n self.atoms = 80\n self.actions = 3\n self.channels = 9\n self.gamma = 0.65\n self.lambdaEntrop = 0.32\n print(self.lambdaEntrop)\n self.lambdaCrit = 0.41667\n self.weightDecay = False\n self.actor = CNNBase(self.channels, self.actions, self.atoms)\n self.optimizer_actor = optim.RMSprop(self.actor.parameters(), lr= self.lr_act, alpha=0.88, eps=1e-5)#, alpha= 0.99, eps=1e-5)#, weight_decay=self.weightDecay)\n self.memory = rpm(250000)\n self.maxReward = 0\n self.minFrame = 0\n self.AveRew = 0\n self.bestEps = 0\n self.ModUpdate = 0\n self.Good = False\n self.maxSteps = 360\n\n def get_action(self, state):\n with torch.no_grad():\n self.eval()\n state = state.to(dtype=torch.float, device=device)\n state = state.reshape([1] + list(state.shape))\n a, val = self.actor.act(state)\n\n log = 0.99\n return int (a), val, log\n\n def learn(self, frame):\n\n self.train()\n #ac_loss = 0\n _actor_loss = 0\n _critic_loss = 0\n Qval = 0\n Qvals = []\n #state_batch, action_batch, next_state_batch, reward_batch, log_batch, value_batch = self.memory.sample_spec(frame)\n state_batch, action_batch, next_state_batch, reward_batch, log_batch, value_batch, done_batch = self.memory.sample(frame)\n state_batch = state_batch.to(dtype=torch.float, device=device)\n action_batch = action_batch.to(dtype=torch.float, device=device)\n reward_batch = reward_batch.to(dtype=torch.float, device=device)\n next_state_batch = next_state_batch.to(dtype=torch.float, device=device)\n done_batch = done_batch.to(dtype=torch.float, device=device)\n #print(next_state_batch.size()) #[12,3,60,80]\n #print(\"Log\", log_batch.size()) #[12,1]\n\n #print(action_batch)\n vals, logs, entropy = self.actor.evaluate_actions(state_batch, action_batch)\n vals = vals.to(dtype=torch.float, device=device)\n entropy = entropy.to(dtype=torch.float, device=device)\n new_vals, _, _ = self.actor.evaluate_actions(next_state_batch, action_batch)\n new_vals = new_vals.to(dtype=torch.float, device=device)\n advantages = (reward_batch + (1-done_batch)*self.gamma*new_vals- vals).to(device)\n critic_loss = advantages.pow(2).mean()\n actor_loss = -(advantages.detach() * logs).mean()\n loss = (actor_loss+critic_loss*self.lambdaCrit -self.lambdaEntrop*entropy).to(device)\n #print(loss)\n self.optimizer_actor.zero_grad()\n\n # Calculate gradients\n loss.backward()\n #ac_loss.backward()\n # Apply gradients\n self.optimizer_actor.step()\n\n with torch.no_grad():\n #ac_loss = float(ac_loss)\n _actor_loss = float(actor_loss)\n _critic_loss = float(critic_loss)\n\n return loss, actor_loss, critic_loss, entropy\n\n def train(self):\n self.actor.train()\n\n def eval(self):\n self.actor.eval()\n\n def save_model(self):\n torch.save(self.actor.state_dict(),'A2C.pkl')\n #self.memory.save_ipt(path)\n\n def load_model(self, path):\n self.actor.load_state_dict(torch.load(path + 'A2C.pkl'))\n # self.memory.load_ipt(path)\n\n\n def step(self, steps, env, m_obs, i_episode):\n #print(\"steps\", steps) #250\n m_reward = [0 for _ in range(10)]\n m_action = [torch.FloatTensor([0]) for _ in range(10)]\n m_value = [torch.FloatTensor([0]) for _ in range(10)]\n m_log = [torch.FloatTensor([0]) for _ in range(10)]\n m_done = [torch.FloatTensor([0]) for _ in range(10)]\n state = [state_to(m_obs[-3:]) for _ in range(10)] # the last 3 items\n #print(\"state: \", type(state), len(state))\n _reward =[]\n done = False\n frame = 0\n batch_frame = 0\n while frame 0\n reward = sum(_reward)\n if important and not self.Good:\n for i in reversed(range(2,3)):\n gam = pow(self.gamma, i)\n rew = torch.FloatTensor([gam*m_reward[-1]])\n #print(str(-1-i))\n self.memory.push([state[-1-i], m_action[-i], state[-i], rew, m_log[-i], m_value[-i], m_done[-i]],\n important)\n #vf important = r>5\n self.memory.push([state[-2], m_action[-1], state[-1], m_reward[-1], m_log[-1], m_value[-1], m_done[-1]],\n important)\n #print(batch_frame)\n if batch_frame == self.batch_size:\n #print(\"Update time\")\n loss, aclos, critlos, entropy = self.learn(batch_frame)\n batch_frame = 1\n #do_print(loss, aclos, critlos, entropy)\n # if ((entropy < 0.25) and (reward > 1)) and i_episode>5:\n # #print(\"TEST\", reward, entropy)\n # #self.save_model('train/test/')\n # #self.test(m_obs, m_reward, m_log, m_value, m_action, state)\n # #self.Good = True\n # else:\n # self.Good = False\n\n # If done, batch data\n if done:\n obs = env.reset()\n if frame == steps:\n\n loss, aclos, critlos, entropy = self.learn(batch_frame)\n do_print(loss, aclos, critlos, entropy)\n\n #obs = env.reset()\n\n return reward, frame, loss, entropy\n\ndef np2torch(s):\n state = torch.from_numpy(s.copy())\n state.to(dtype=torch.float)\n #state = state.reshape([1] + list(state.shape))\n return state.to(dtype=torch.float, device=device)#, device=device)\n\ndef state_to(pov):\n state = torch.cat(pov, 2) #concatenates given sequence of tensors in given dimension\n state = state.permute(2, 0, 1) #permute dimensions of tensor\n return state.to(dtype=torch.float, device=device)#.to(torch.device('cpu'))\n\ndef do_print(loss, aclos, critlos, entropy):\n print('loss %2.7f acloss %2.7f critloss %2.7f entropy %2.7f' % \\\n (loss, aclos, critlos, entropy))\n\n\ndef envstep(env, action_num):\n reward = 0\n #print(action)\n obs, rew, done, info = env.step(action_num)\n #env.render('human')\n #rew = -0.01\n if rew>0:\n print(\"REWARD\")\n #rew = torch.LongTensor(rew)\n if done:\n done = 1\n else:\n done = 0\n return obs, rew, done, info, 1\n\ndef plotGraph(episodes, codeName, rew_all, Plotrew_all, list_lr, list_ac_loss, i_episode, entropy):\n plt.figure()\n plt.plot(episodes, rew_all, 'r--', episodes, list_lr, 'b.')\n plt.savefig('/home/anna/gym-miniworld/scripts/' + str(codeName) + 'A2C2Episode' + str(i_episode) + '.png')\n plt.close()\n plt.figure()\n plt.plot(episodes, Plotrew_all, 'r--', episodes, list_ac_loss, 'b--')\n plt.savefig('/home/anna/gym-miniworld/scripts/' + str(codeName) + 'A2C2Loss' + str(i_episode) + '.png')\n plt.close()\n\ndef read():\n with open ('A2CResults.csv', 'r') as f:\n Reader1 = reader(f, delimiter=',')\n Rows = list(Reader1)\n Tot_rows = len(Rows)\n return Tot_rows\n\ndef write(Agent1, cdName, AveRew, sum_episodes, tot_frame):\n with open('A2CResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([str(cdName), \"AC2\", str(Agent1.lr_act), str(Agent1.lr_crit),\n str(Agent1.gamma),str(Agent1.lambdaCrit),str(Agent1.lambdaEntrop),\n str(Agent1.weightDecay), str(Agent1.maxReward),\n str(Agent1.minFrame), str(Agent1.bestEps),\n str(AveRew), str(tot_frame), str(Agent1.maxSteps), str(sum_episodes),\n str(Agent1.ModUpdate), str(Agent1.batch_size), str(Agent1.channels)])\n\ndef write_episode(_rew, frame, entropy):\n with open('A2C-EpisodeResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([_rew, entropy])\n\ndef write_start():\n with open('A2C-EpisodeResults.csv', 'a', newline='') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow([\"START\"])\n\ndef train(episode, env):\n\n Agent1 = Agent()\n Agent1.actor= Agent1.actor.to(device = device)\n write_start()\n #Agent1.load_model('train' + str(Agent1.tryNum) + '/')\n sum_episodes = episode\n rew_all = []\n Plotrew_all = []\n codeName = read()\n list_lr = []\n list_ac_loss = []\n list_crit_loss = []\n AveRew = 0\n #Agent1.save_model('train/')\n \n tot_rew = 0\n tot_frame = 0\n write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n for i_episode in range(sum_episodes):\n print(\"episode: \", i_episode)\n eps = i_episode\n obs = env.reset()\n #env.render('human')\n m_obs = [np2torch(obs) for _ in range(10)]\n _reward = []\n #Agent1.memory.load_ipt('train' + str(Agent1.tryNum) + '/')\n _rew, frame, ac_loss, entropy= Agent1.step(env.max_episode_steps, env, m_obs, i_episode)\n write_episode(_rew, frame, entropy)\n list_ac_loss.append(ac_loss)\n #list_crit_loss.append(crit_loss)\n list_lr.append(Agent1.lr_act)\n rew_all.append(_rew)\n tot_rew += _rew\n\n if _rew >Agent1.maxReward:\n Agent1.maxReward = _rew\n Agent1.minFrame = frame\n Agent1.bestEps = i_episode\n if entropy < 0.7:\n Agent1.save_model()\n if _rew>= 14:\n Agent1.lr_act = 1e-7\n tot_frame += frame\n Plottot_rew = _rew - 1\n Plotrew_all.append(Plottot_rew)\n\n if (i_episode % 100 == 0) and (i_episode != 0) or (i_episode == episode-1):\n episodes = range(0, i_episode+1)\n plotGraph(episodes, codeName, rew_all, Plotrew_all, list_lr, list_ac_loss, i_episode, entropy )\n AveRew = tot_rew / (eps+1)\n #Agent1.save_model('train/')\n #write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n\n print('epi %d frame %5d loss %2.5f entropy %2.5f reward %2.5f'%\\\n (i_episode, frame, ac_loss, entropy, _rew))\n\n\n AveRew = tot_rew / eps\n #Agent1.save_model('train/')\n write(Agent1, codeName, AveRew, sum_episodes, tot_frame)\n\n\n\nif __name__ == '__main__':\n print(\"Make environment\")\n env = gym.make('MiniWorld-OneRoom-v0')\n #env = RGBImgPartialObsWrapper(env)\n #env = ImgObsWrapper(env)\n #env.render('human')\n #env.framerate = 5\n done = False\n obs = env.reset()\n #a = float(sys.argv[1])\n env.seed(1000)\n #print(obs.shape())\n env.max_episode_steps =1000\n train(700, env)\n","sub_path":"A2C1.py","file_name":"A2C1.py","file_ext":"py","file_size_in_byte":12569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"356169213","text":"import theano\nfrom theano import tensor as T\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport struct\n#import load_mnist\n\ndef load_mnist(path, kind = 'train'):\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte' %kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' %kind)\n\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))#8byte 읽어오기\n#struct.unpack(fmt, buffer):\n#Unpack from the buffer buffer (presumably packed by pack(fmt, ...)) \n#according to the format string fmt. The result is a tuple even if it contains exactly one item. \n#format에서:\n#> : Big-endian is an order in which the \"big end\" (most significant value in the sequence) \n#is stored first\n#I = unsigned integer\n\n labels = np.fromfile(lbpath, dtype = np.uint8)\n#Construct an array from data in a text or binary file.\n#A highly efficient way of reading binary data with a known data-type, \n#as well as parsing simply formatted text files. \n#Data written using the tofile method can be read using this function. \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\", imgpath.read(16))#16바이트 읽어오기\n \n images = np.fromfile(imgpath, dtype = np.uint8).reshape(len(labels), 784)\n \n return images, labels\n \n \n \nX_train, y_train = load_mnist('mnist', kind='train')\nX_test, y_test = load_mnist('mnist', kind='t10k')\n\n#print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))\n#print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1])) \n\ntheano.config.floatX = 'float32'\n\nX_train = X_train.astype(theano.config.floatX)\nX_test = X_test.astype(theano.config.floatX)\n\nfrom keras.utils import np_utils\n#print('First 3 labels: ', y_train[:3])\n\ny_train_ohe = np_utils.to_categorical(y_train)\n#print('\\nFirst 3 labels (one-hot):\\n', y_train_ohe[:3])\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\nnp.random.seed(1)\n\nmodel = Sequential()\nmodel.add(Dense(input_dim=X_train.shape[1], output_dim=50, init='uniform', activation='tanh'))\nmodel.add(Dense(input_dim=50, output_dim=50, init='uniform', activation='tanh'))\nmodel.add(Dense(input_dim=50, output_dim=y_train_ohe.shape[1], init='uniform', activation='softmax'))\n\nsgd = SGD(lr=0.001, decay=1e-7, momentum=0.9)\nmodel.compile(optimizer=sgd, loss='categorical_crossentropy', metrics = ['accuracy'])\nmodel.fit(X_train, y_train_ohe, nb_epoch=1000, batch_size=15, verbose=1, validation_split=0.1)\n#이게 batch size가 15이상 올라가면 안돼고 계속 멈췄다....하 이 것 때문에 5시간 넘게 뻘 짓을...\n\ny_train_pred = model.predict_classes(X_train, verbose=0)\n\nprint('First 3 predictions: ', y_train_pred[:3])\n\ntrain_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]\nprint('Training accuracy: %.2f%%' % (train_acc * 100))\n\ny_test_pred = model.predict_classes(X_test, verbose=0)\ntest_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]\nprint('Test accuracy: %.2f%%' % (test_acc * 100))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"chapter13/chapter13_ex4.py","file_name":"chapter13_ex4.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"308535463","text":"import pickle\nimport os\nfrom Error import *\nfrom auth import *\nfrom auth_admin import *\nfrom product import *\nfrom check import *\nimport base64\nimport random\nfrom cart import *\n#---------------------------------------------------------------------------------------------------------------------------\n# if you run this file all the data will be intialled\n#---------------------------------------------------------------------------------------------------------------------\n\n\ndef initial():\n data=[]\n save_adminDB(data)\n save_cartDB(data)\n save_chatDB(data)\n save_productDB(data)\n save_recordDB(data)\n save_searchDB(data)\n save_UserDB(data)\n\n# if you run this function, the userDB will refresh, and will only contain the following data \ndef put_data_into_user():\n #refresh userDB to []\n data=[]\n save_UserDB(data)\n\n #add data\n reed=user_register('z5221388@ad.unsw.edu.au',123,'reed')[\"token\"]\n alex=user_register('z5190777@ad.unsw.edu.au',123,'alex')[\"token\"]\n leslie=user_register('z5212833@ad.unsw.edu.au',123,'leslie')[\"token\"]\n dylan=user_register('z5157999@ad.unsw.edu.au',123,'dylan')[\"token\"]\n tony=user_register('z5238695@ad.unsw.edu.au',123,'tony')[\"token\"]\n robot=user_register('z5555555@ad.unsw.edu.au',123,'robot')[\"token\"]\n user_logout(reed)\n user_logout(alex)\n user_logout(leslie)\n user_logout(dylan)\n user_logout(tony)\n user_logout(robot)\n print(\"Put data into user finish\")\n\n\n# if you run this function, the userDB will refresh, and will only contain the following data \ndef put_data_into_admin():\n #refresh adminDB to []\n data=[]\n save_adminDB(data)\n\n #add data\n admin=admin_add('5hd@ad.unsw.edu.au',123,'5hd')\n print(\"Put data into admin finish\")\n\ndef put_data_into_product():\n #refresh productDB to []\n data=[]\n save_productDB(data)\n\n adminDB=load_adminDB()\n \n if isLoggedin(adminDB[0]['email'])!=1:\n admin_login(adminDB[0]['email'], 123)\n\n token=adminDB[0]['token']\n \n '''\n the following is for category 0, electronic add_product(token, category_id, name, detail, price, stock, tag)\n '''\n #phone\n add_product(token,0,'Apple iPhone 12 128GB (Black)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'black iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (Blue)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'blue iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (Green)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'green iphone12 128G','','')\n add_product(token,0,'Apple iPhone 12 128GB (White)', '6.1-inch Super Retina XDR display* Ceramic Shield, tougher than any smartphone glass 5G for superfast downloads and high-quality streaming*',1429, 10, 'white iphone12 128G','','')\n\n #switch\n add_product(token,0,'Nintendo Switch Console Neon', 'Buy the Nintendo Switch Neon Console online today and experience a full home video game console experience anytime, anywhere!',399, 10, 'red blue switch','','')\n add_product(token,0,'Nintendo Switch Console Mario Red & Blue Edition', 'Nintendo Switch – Mario Red & Blue Edition, with a distinct red-and-blue colour scheme in honour of Mario’s iconic outfit.',449, 10, 'Mario Red Blue Edition switch','','')\n add_product(token,0,'Nintendo Switch Console Grey', 'Buy the Nintendo Switch Grey Console online today and experience a full home video game console experience anytime, anywhere!',399, 10, 'gray switch','','')\n\n #macbook\n add_product(token,0,'Apple MacBook Air 13-inch Space Grey', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1599, 10, 'laptop macbook air space gray','','')\n add_product(token,0,'Apple MacBook Pro 13-inch Space Grey', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1999, 10, 'laptop macbook pro space gray','','')\n add_product(token,0,'Apple MacBook Air 13-inch Silver', 'Apple-designed M1 chip for a giant leap in CPU, GPU and machine learning performance Go longer than ever with up to 18 hours of battery life* 8-core CPU delivers up to 3.5x faster performance, to tackle projects faster than ever*',1999, 10, 'laptop macbook air silver','','')\n\n '''\n the following is for category 1, book\n '''\n add_product(token,1,'hunger game 1', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n add_product(token,1,'hunger game 2', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n add_product(token,1,'hunger game 3', 'a famous book which talk about the adventure of a girl',50, 100, 'science adventure youth fiction','','')\n\n add_product(token,1,'Harry Potter 1', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter 2', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter 3', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n\n add_product(token,1,'Hobbits 1', 'The story narrated a sorcerer leads the story which 13 dwarves and east a Huo bit person treasurees hunt',69, 100, 'magic war fiction','','')\n add_product(token,1,'Hobbits 2', 'The story narrated a sorcerer leads the story which 13 dwarves and east a Huo bit person treasurees hunt',69, 100, 'magic war fiction','','')\n \n add_product(token,1,'Romeo and Juliet', 'a story about love',80, 100, 'love romantic youth fiction','','')\n add_product(token,1,'Jane Eyre', 'Jane Eyre ranks as one of the greatest and most perennially popular works of English fiction',85, 100, 'love realism fiction','','')\n\n \n '''\n the following is for category 2, sport\n '''\n add_product(token,2,'air jordan 34', 'a shoes which is belong to nike, air jordan series', 1200, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 1', 'a shoes which is belong to nike, air jordan series', 1100, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 32', 'a shoes which is belong to nike, air jordan series', 800, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 4', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n\n add_product(token,2,'baseball cap', 'a cap for baseball', 40, 30, 'baseball cap','','')\n add_product(token,2,'baseball bat', 'a bat for baseball', 80, 30, 'baseball bat','','')\n\n add_product(token,2,'hair band', 'band for sport', 70, 30, 'nike band','','')\n add_product(token,2,'tennis racket', 'racket for tennis', 76, 30, 'tennis racket','','')\n add_product(token,2,'tennis shoes', 'shoes for tennis', 60, 30, 'tennis shoes','','')\n add_product(token,2,'badminton racket', 'racket for badminton', 90, 30, 'badminton racket','','')\n\n\n\n '''\n the following is for category 3, clothes\n '''\n add_product(token,3,'Balenciaga T shirt', 'a white extravagant t-shirt', 5000, 30, 'Balenciaga T-shirt black','','' )\n add_product(token,3,'Balenciaga sweater', 'a black sweater', 5200, 30, 'Balenciaga sweater black','','' )\n\n\n add_product(token,3,'lv fleece', 'a white fleece', 4000, 30, 'lv fleece white','','' )\n add_product(token,3,'lv trousers', 'a white trousers', 3400, 30, 'lv trousers','','' )\n\n add_product(token,3,'burberry fleece', 'a brown fleece', 8000, 30, 'burberry fleece brown','','' )\n add_product(token,3,'burberry shirt', 'a blue shirt', 3400, 30, 'burberry blue shirt','','')\n\n add_product(token,3,'gucci sweater', 'a blue sweater', 9100, 30, 'gucci sweater','','')\n add_product(token,3,'gucci jeans', 'a blue jeans', 9100, 30, 'gucci jeans','','')\n\n add_product(token,3,'nike shorts', 'a black shorts', 900, 30, 'nike shorts','','')\n add_product(token,3,'nike shirt', 'a black shirt', 900, 30, 'nike shirt','','')\n \n '''\n the following is for category 4, home\n '''\n add_product(token,4,'desk', 'a normal desk fro working', 323, 30, 'black medium desk','','' )\n add_product(token,4,'table', 'a normal table for chatting', 360, 30, 'white medium table','','' )\n add_product(token,4,'chair', 'a normal chair', 400, 30, 'chair','','' )\n add_product(token,4,'cabinet', 'a normal cabinet for storing', 500, 30, 'black cabinet','','' )\n add_product(token,4,'sofa', 'a sofa', 3000, 30, 'sofa','','' )\n add_product(token,4,'table lamp', 'a table lamp', 200, 30, 'table lamp','','' )\n add_product(token,4,'refrigerator ', 'a refrigerator ', 700, 30, 'refrigerator','','' )\n add_product(token,4,'quilt', 'a quilt', 300, 30, 'quilt','','' )\n add_product(token,4,'pillow', 'a pillow', 7000, 30, 'pillow','','' )\n add_product(token,4,'washing machine', 'a washing machine', 600, 30, 'washing machine','','' )\n\n '''\n the following is for category 5, toy\n '''\n add_product(token,5,'LEGO JEEP', 'a model for JEEP', 3000, 30, 'model splicing car','','' )\n add_product(token,5,'LEGO benz', 'a model for benz', 3300, 30, 'model splicing car','','' )\n add_product(token,5,'LEGO ship', 'a model for ship', 2000, 30, 'model splicing ship','','' )\n\n add_product(token,5,'telecontrolled car', 'a telecontrolled car', 5000, 30, 'telecontrolled car','','' )\n add_product(token,5,'telecontrolled plane', 'a telecontrolled plane', 5500, 30, 'telecontrolled plane','','' )\n add_product(token,5,'telecontrolled ship', 'a telecontrolled ship', 4000, 30, 'telecontrolled ship','','' )\n\n\n add_product(token,5,'plush toy', 'plush toy', 4000, 30, 'plush toy','','' )\n add_product(token,5,'sliding plate', 'a sliding plate', 4000, 30, 'sliding plate','','' )\n add_product(token,5,'female barbie doll ', 'a female barbie doll', 4000, 30, 'female barbie doll','','' )\n add_product(token,5,'male barbie doll ', 'a male barbie doll', 4000, 30, 'male barbie doll','','' )\n\n # id here start with 60, take care when you give name to the photo\n\n '''\n the following is for category 0, electronic add_product(token, category_id, name, detail, price, stock, tag)\n '''\n add_product(token,0,'Samsung Galaxy 20', 'The 4500mAh (typical) battery gives your phone the juice it needs to outlast your day and power for when you really need it',12499, 30,'samsung phone','','')\n add_product(token,0,'Samsung Galaxy Z Fold2 5G', '7.6-inch Tablet-like Display and Full-viewing Cover Screen',2499, 30, 'samsung phone','','')\n add_product(token,0,'Samsung Galaxy S21+', 'Pro-grade Camera and Intelligent Infinity-O Display',1299, 20,'samsung phone','','')\n add_product(token,0,'HUAWEI P40 Pro 5G','Ultra Vision Leica Quad Camera, VIP Service - Deep Sea Blue',1188,30,'huawei phone','','')\n\n add_product(token,0,'HUAWEI MATE 30 PRO','DUAL-SIM LIO-L29',899,30,'huawei phone','','')\n add_product(token,0,'Huawei P30 Pro','Dual Sim 40MP 8GB 256GB Mobile Phone',998,30,'huawei phone','','')\n\n add_product(token,0,'Surface Laptop 3','13.5“,Sandstone(Metal),Intel Core i7',2099,30,'Surface laptop','','')\n add_product(token,0,'Surface Pro 7','Platinum,Intel Core i7',2099,30,'Surface laptop','','')\n add_product(token,0,'Microsoft Surface Laptop 3','13.5“,128GB i5 Platinum',1298,30,'Surface laptop','','')\n\n '''\n the following is for category 1, book\n '''\n add_product(token,1,'Harry Potter and the Goblet of Fire', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Order of the Phoenix', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Half-Blood Prince', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n add_product(token,1,'Harry Potter and the Deathly Hallows', 'a story about magic world',80, 100, 'magic adventure youth fiction','','')\n\n add_product(token,1,'Wuthering Heights', 'an 1847 novel by Emily Bronte',80, 100, 'romantic realism fiction','','')\n add_product(token,1,'War and Peace', 'a novel by the Russian author Leo Tolstoy',80, 100, 'historical war fiction','','')\n add_product(token,1,'The Great Gatsby', 'a 1925 novel by American writer F. Scott Fitzgerald',80, 100, 'romantic love american-dream fiction','','')\n add_product(token,1,'Norwegian Wood', 'a 1987 novel by Japanese author Haruki Murakami',80, 100, 'nostalgic love fiction','','')\n add_product(token,1,'The lady of the camellias', 'a novel by Alexandre Dumas fils,',80, 100, 'romantic tragedy fiction','','')\n add_product(token,1,'The Hunchback of Notre-Dame', ' a French Gothic novel by Victor Hugo,',80, 100, 'gothic religion love fiction','','')\n\n\n '''\n the following is for category 2, sport\n '''\n add_product(token,2,'air jordan 1 Travis Scott ', 'a shoes which is belong to nike, air jordan series', 4200, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 6', 'a shoes which is belong to nike, air jordan series', 1100, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 32 Golden Harvest', 'a shoes which is belong to nike, air jordan series', 800, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 4 Linen', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n add_product(token,2,'air jordan 11', 'a shoes which is belong to nike, air jordan series', 500, 30, 'shoes nike AJ','','')\n\n add_product(token,2,'NYC Baseball Cap', 'a cap for baseball', 40, 30, 'baseball cap','','')\n add_product(token,2,'Brooklyn Basher Baseball Bat', 'a bat for baseball', 80, 30, 'baseball bat','','')\n\n add_product(token,2,'Tennis Ball', 'Competition used tennis ball ', 76, 30, 'tennis ball','','')\n add_product(token,2,'Wilson Pro Tennis Racket', 'racket for tennis', 76, 30, 'tennis racket','','')\n add_product(token,2,'Adidas tennis shoes', 'shoes for tennis', 60, 30, 'tennis shoes','','')\n add_product(token,2,'Lining badminton racket', 'racket for badminton', 90, 30, 'badminton racket','','')\n '''\n the following is for category 3, clothes\n '''\n add_product(token,3,'Balenciaga T shirt', 'a beige extravagant t-shirt', 5000, 30, 'Balenciaga T-shirt beige','','' )\n add_product(token,3,'Balenciaga Hoodie', 'a black sweater', 5200, 30, 'Balenciaga Hoodie black','','' )\n\n\n add_product(token,3,'lv jacket', 'a black fleece with LV logo', 4000, 30, 'lv black jackte','','' )\n add_product(token,3,'lv logo pants ', 'a black shiny pants', 3400, 30, 'lv black pants','','' )\n\n add_product(token,3,'burberry jacket', 'a brown fleece', 8000, 30, 'burberry jacket brown','','' )\n add_product(token,3,'burberry plaid shirt', 'a signature beige plaid shirt', 3400, 30, 'burberry plaid shirt','','')\n\n add_product(token,3,'gucci cardigan', 'a blue&brown cardigan', 9100, 30, 'gucci brown blue cardigan','','')\n add_product(token,3,'gucci jeans skinny', 'a blue jeans', 9100, 30, 'gucci jeans skinny','','')\n\n add_product(token,3,'nike swoosh shorts ', 'a black shorts with swoosh', 900, 30, 'nike shorts black','','')\n add_product(token,3,'nike swoosh T-shirt', 'a black t-shirt', 900, 30, 'nike t-shirt black','','')\n '''\n the following is for category 4, home\n '''\n add_product(token,4,'Wood Desk', 'a normal wood desk fro working', 323, 30, 'wooden brown desk','','' )\n add_product(token,4,'Wooden table', 'a normal table for studying', 360, 30, 'wooden brown table','','' )\n add_product(token,4,'wooden chair', 'a normal chair', 400, 30, 'wooden chair','','' )\n add_product(token,4,'wooden cabinet', 'a normal cabinet for storing', 500, 30, 'wooden brown cabinet','','' )\n add_product(token,4,'Double cloth Sofa', 'a sofa', 3000, 30, 'Cloth sofa','','' )\n add_product(token,4,'pink table lamp', 'a table lamp', 200, 30, 'pink table lamp','','' )\n add_product(token,4,'Hisense refrigerator ', 'a refrigerator ', 700, 30, 'silver refrigerator','','' )\n add_product(token,4,'wool quilt', 'a quilt', 300, 30, 'wool quilt','','' )\n add_product(token,4,'Haier washing machine', 'a washing machine', 600, 30, 'white washing machine','','' )\n '''\n the following is for category 5, toy\n '''\n add_product(token,5,'Lego Harry Potter Castle','71043 Castle Model Building Kit with Harry Potter Figures',519,30,'Lego Model Castle','','')\n add_product(token,5,'Jellycat Bunny','Jellycat Small Bashful Bunny',200,30,'Plush toy ','','')\n add_product(token,5,'JellyCat Dragon','Bashful Dragon medium 31cm soft toy',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat Curvie Pig',' super fluffy lovable look pig',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat Amuseable Cloud','dreamy companion cutie cloud',519,30,'plush soft toy','','')\n add_product(token,5,'JellyCat bag','Amusable Pineapple woven bag 33cm',200,30,'plush soft toy bag','','')\n\n\n add_product(token,5,'JellyCat Pear bag','Amusable Pear plushn cross body bag',519,30,'plush soft toy bag','','')\n\n\n add_product(token,5,'Lego Technic Porche','Porche 911 RSR 42096 Building Kit',1000,30,'lego model splicing car','','')\n add_product(token,5,'Lego Star Wars','The Rise of Skywalker Millennium Falcon 75257 Building Kit',1000,30,'lego model splicing ship','','')\n admin_logout(adminDB[0]['token'])\n print(\"put data into product finish\")\n\n\ndef add_photo():\n adminDB=load_adminDB()\n productDB=load_productDB()\n if isLoggedin(adminDB[0]['email'])!=1:\n admin_login(adminDB[0]['email'], 123)\n\n token=adminDB[0]['token']\n\n #this is for add first img\n for i in range(0,len(productDB)):\n url='photo/'+str(i)+'-'+'0'+'.png'\n if os.path.exists(url):\n base_64=change_iml_to_base64(url)\n base_64='data:image/png;base64,'+str(base_64)\n update_first_photo(token,i,base_64)\n #this is for add second img\n for i in range(0,len(productDB)):\n url='photo/'+str(i)+'-'+'1'+'.png'\n if os.path.exists(url):\n base_64=change_iml_to_base64(url)\n base_64='data:image/png;base64,'+str(base_64)\n update_second_photo(token,i,base_64)\n\n admin_logout(adminDB[0]['token'])\n print(\"add photo finish\")\n\ndef add_pro():\n token=user_login('z5555555@ad.unsw.edu.au',123)['token']\n productDB=load_productDB()\n id=[]\n for i in productDB:\n id.append(i['id'])\n length=len(productDB)\n l=random.sample(id, length)\n\n j=0\n for i in l:\n j+=1\n add_product_to_cart(token,i)\n if j%10==0 or j==length:\n purchase_product(token)\n user_logout(token)\n print(\"add record\")\ndef change_iml_to_base64(url):\n f=open(url,'rb')#第一个参数图像路径\n ls_f=base64.b64encode(f.read())\n ls_f = ls_f.decode(\"utf-8\")\n f.close()\n return ls_f\n\n\nif __name__ == '__main__':\n initial()\n\n put_data_into_admin()\n\n put_data_into_user()\n\n put_data_into_product()\n\n add_pro()\n\n add_photo()\n\n \n \n \n\n \n \n \n","sub_path":"RecommendWebsite/reed/put_data.py","file_name":"put_data.py","file_ext":"py","file_size_in_byte":19647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"456968908","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nimport sys\n\nclass myContentHandler(ContentHandler):\n\n def __init__ (self):\n self.inItem = False\n self.inContent = False\n self.Content = \"\"\n self.Html = \"\"\n self.Title = \"\"\n\n def startElement (self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title' or name == 'link':\n self.inContent = True\n \n def endElement (self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.Title += self.Content\n # To avoid Unicode trouble\n self.inContent = False\n self.Content = \"\"\n elif name == 'link':\n self.Html += (\"\"\n + self.Title + \"
\\n\")\n self.inContent = False\n self.Content = \"\"\n self.Title = \"\"\n\n def characters (self, chars):\n if self.inContent:\n self.Content = self.Content + chars\n \n# --- Main prog\ndef getNews(): \n # Load parser and driver\n\n theParser = make_parser()\n theHandler = myContentHandler()\n theParser.setContentHandler(theHandler)\n\n # Ready, set, go!\n\n theParser.parse(\"http://barrapunto.com/index.rss\")\n return \"
News:
\" + theHandler.Html","sub_path":"cms/xmlparser.py","file_name":"xmlparser.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"29554904","text":"#!/usr/bin/env python3\n# session analysis\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom common import get_t\nimport os\nimport argparse\nimport time\n\ndef read_file(file_name, out_dir=None):\n user_events = defaultdict(list)\n items_view_freq = defaultdict(int)\n items_purchase_freq = defaultdict(int)\n item_list = set()\n behavior_list = set()\n with open(file_name, 'r') as in_f:\n for line in tqdm(in_f):\n ad_id, item_id, behavior, ts = line.strip().split('\\t')\n if item_id:\n item_id = item_id.replace(' ', '')\n user_events[ad_id].append((item_id, behavior, int(ts)))\n if behavior == 'ViewContent':\n items_view_freq[item_id] += 1\n elif behavior == 'revenue':\n items_purchase_freq[item_id] += 1\n item_list.add(item_id)\n behavior_list.add(behavior + ':' + item_id)\n if out_dir:\n with open(f'{out_dir}/items_view_freq.csv', 'w') as f:\n [print(f'{key}\\t{value}', file=f) for key, value in sorted(items_view_freq.items(), key=lambda item: item[1], reverse=True)]\n with open(f'{out_dir}/items_purchase_freq.csv', 'w') as f:\n [print(f'{key}\\t{value}', file=f) for key, value in sorted(items_purchase_freq.items(), key=lambda item: item[1], reverse=True)]\n\n with open(f'{out_dir}/user_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(user_events.keys()))]\n with open(f'{out_dir}/item_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(item_list))]\n with open(f'{out_dir}/behavior_idx.csv', 'w') as f:\n [print(f'{key}\\t{idx}', file=f) for idx, key in enumerate(list(behavior_list))]\n return user_events\n\n# session_period : sec\ndef session_process(tmp_user_events, session_period=None, last_N=10):\n user_sessions = defaultdict(list)\n user_last_N_events = defaultdict(list)\n user_events = defaultdict(list)\n for ad_id in tqdm(tmp_user_events):\n if len(tmp_user_events[ad_id]) > 5:\n user_events[ad_id] = tmp_user_events[ad_id]\n\n if session_period:\n for ad_id in tqdm(user_events):\n tmp = []\n user_events[ad_id].sort(key = lambda x:x[-1])\n user_last_N_events[ad_id] = user_events[ad_id][-last_N:]\n for event in user_events[ad_id]:\n if len(tmp) == 0:\n tmp.append(event)\n else:\n if event[-1] - tmp[-1][-1] < session_period:\n tmp.append(event)\n else:\n user_sessions[ad_id].append(tmp)\n tmp = [event]\n if len(tmp) > 0:\n user_sessions[ad_id].append(tmp)\n\n else:\n for ad_id in tqdm(user_events):\n user_events[ad_id].sort(key = lambda x:x[-1])\n user_last_N_events[ad_id] = user_events[ad_id][-last_N:]\n user_sessions[ad_id].append(user_events[ad_id])\n\n return user_sessions, user_last_N_events\n\n\ndef user_events_session_statistic(user_event_session):\n user_count = len(user_event_session)\n sessions_count, session_length = 0, 0\n for idx, user in tqdm(enumerate(user_event_session)):\n sessions = user_event_session[user]\n if len(sessions) < 2:\n continue\n # sessions_count += len(sessions)\n for session in sessions:\n session_length += len(session)\n sessions_count += 1\n if idx < 1000:\n print(f\"{user} ---> {user_event_session[user]}\")\n print(f'user_count:{user_count}, sessions_count:{sessions_count}, avg_session_length:{session_length/sessions_count}')\n\n\ndef save_test_file_new(user_event_session , file_name, last_N=10):\n with open(file_name, 'w') as out_f:\n for user in tqdm(user_event_session):\n sessions = user_event_session[user]\n if len(sessions) < 2:\n continue\n \n for idx in range(len(sessions)-1):\n if len(sessions[idx]) >= last_N and len(sessions[idx+1]) >= 5:\n history_events = [f'{s[1]}:{s[0]}' for s in sessions[idx]]\n predict_events = [f'{s[1]}:{s[0]}' for s in sessions[idx+1]]\n if set(history_events) != set(predict_events):\n print(f\"{'#'.join(history_events)}\\t{'#'.join(predict_events)}\", file=out_f)\n\n \ndef save_user_event_seqence(user_event_session, file_name):\n \"\"\"\n - train seqence\n - test prefix 10 seqence\n \"\"\"\n with open(file_name, 'w') as out_f: \n for user in tqdm(user_event_session):\n for session in user_event_session[user]:\n if len(session) < 3: continue\n events = [f'{s[1]}:{s[0]}' for s in session]\n print(' '.join(events), file=out_f)\n \n\n\ndef save_test_file(user_event_session, user_last_N_events, file_name):\n \"\"\"\n ev1, ev2, ev3, ev4, ev5 ......\n ev6, ev7, ev8, ev9, ......\n \"\"\"\n with open(file_name, 'w') as out_f: \n for user in tqdm(user_event_session):\n if user in user_last_N_events:\n print('#'.join([f'{s[1]}:{s[0]}' for s in user_last_N_events[user]]), end='\\t', file=out_f)\n else:\n print('', end='\\t', file=out_f)\n\n for session in user_event_session[user]:\n events = [f'{s[1]}:{s[0]}' for s in session]\n print('#'.join(events), file=out_f)\n \n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"python3 0_raw_data_handler.py\")\n parser.add_argument(\"date\", type=str, help=\"date\")\n parser.add_argument(\"output_dir\", type=str, help=\"output file foler\")\n parser.add_argument(\"--session_period\", type=int, default=None, help=\"how long would consider the new session (sec)\")\n parser.add_argument(\"--last_N\", type=int, default=10, help=\"How many reference events when testing\")\n args = parser.parse_args()\n tr_data = f\"data/{args.date}/tr_data/merged.data\"\n te_data = f\"../data/{args.date}/te_data/merged.data\"\n #te_data = f\"te_sample.data\"\n # train\n print(f\"[{get_t()}] reading train data events\")\n #events = read_file(tr_data, args.output_dir)\n print(f\"[{get_t()}] train data session_process\")\n #sessions, last_N_events = session_process(events, session_period=args.session_period, last_N=args.last_N)\n print(f\"[{get_t()}] train data session_statistic\")\n #user_events_session_statistic(sessions)\n print(f\"[{get_t()}] train data save file\")\n #save_user_event_seqence(sessions, os.path.join(args.output_dir, 'tr_data.csv'))\n # release memory\n events, sessions = None, None\n #time.sleep(15)\n\n # test\n print(f\"[{get_t()}] reading test data events\")\n events = read_file(te_data)\n print(f\"[{get_t()}] test data session_process\")\n sessions, _ = session_process(events, session_period=args.session_period, last_N=args.last_N)\n print(f\"[{get_t()}] test data session_statistic\")\n user_events_session_statistic(sessions)\n save_test_file_new(sessions, f'test.sample.{args.session_period}.csv', last_N=args.last_N)\n\n # print(f\"[{get_t()}] reading sample data events\")\n # sample_events = read_file('data/sample.csv', 'data')\n # sample_sessions, sample_last_N_events = session_process(sample_events, session_period=None)\n # user_events_session_statistic(sample_sessions)\n # save_user_event_seqence(sample_sessions, 'tr_data.csv')\n # save_test_file(sample_sessions, sample_last_N_events, 'te_data.csv')\n # # for u in sample_sessions:\n # # print(sample_sessions[u])\n","sub_path":"data_process/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"552984673","text":"# Python standard library modules\nimport asyncio\nimport bisect\nimport io\nimport json\nimport logging\nimport random\nimport sys\nimport time\nfrom urllib.parse import splittype, urljoin\n\n# Third-party modules\nimport aiohttp\nimport click\nimport coloredlogs\nimport jwt\nimport websockets\nfrom jsonrpcserver import method, async_dispatch as dispatch\nfrom jsonrpcserver.response import DictResponse\n\n\nRECSYS_NAME = 'baseline'\n\nENVVAR_PREFIX = 'RENEWAL'\n\nRENEWAL_API_BASE_URI = 'https://api.renewal-research.com/v1'\n\nINITIAL_ARTICLES = 1000\n\"\"\"Number of articles to initialize the in-memory article cache with.\"\"\"\n\nMAX_ARTICLES = 10000\n\"\"\"Maximum number of articles to keep cached in memory.\"\"\"\n\nRECOMMEND_DEFAULT_LIMIT = 30\n\"\"\"Max number of recommendations to return by default.\"\"\"\n\n\nlog = logging.getLogger(RECSYS_NAME)\n# Log all uncaught exceptions\nsys.excepthook = lambda *exc_info: log.exception(\n 'an uncaught exception occurred', exc_info=exc_info)\n\narticles = None\n\"\"\"Articles cache; initialized in `initialize`.\"\"\"\n\n\nasync def initialize(api_base_uri, token):\n \"\"\"Start-up tasks to perform before starting the main client loop.\"\"\"\n\n global articles\n\n log.info(f'initializing articles cache with {INITIAL_ARTICLES} articles')\n headers = {'Authorization': 'Bearer ' + token}\n async with aiohttp.ClientSession(\n headers=headers, raise_for_status=True) as session:\n async with session.get(urljoin(api_base_uri, 'articles'),\n params={'limit': INITIAL_ARTICLES}) as resp:\n articles = ArticleCollection(await resp.json())\n log.debug(f'cached {len(articles)} articles')\n\n\n# RPC methods\n# WARNING: Don't forget to make these functions async even if they\n# don't use await, otherwise the async_dispatch gets confused.\n\n@method\nasync def ping():\n return 'pong'\n\n\n@method\nasync def new_article(article):\n articles.push(article)\n\n\n@method\nasync def recommend(user_id, limit=RECOMMEND_DEFAULT_LIMIT, since_id=None,\n max_id=None):\n \"\"\"Return recommendations for the specified user and article ID range.\"\"\"\n\n # Currently just supports the 'random' strategy: Take a random selection\n # of up to limit articles from the given range.\n if since_id is None:\n # If no since_id is given (i.e. we are being asked for the most recent\n # articles, just take the top `limit * 2` articles and then take a\n # random selection from them\n start = -2 * limit\n else:\n start = since_id + 1\n end = max_id\n selection = articles[start:end]\n limit = min(limit, len(selection))\n sample = sorted(random.sample(range(len(selection)), limit), reverse=True)\n return [selection[idx]['article_id'] for idx in sample]\n\n\n# websocket server loops\n\nasync def request_loop(api_base_uri, token):\n \"\"\"\n Main loop of the recsystem application.\n\n Connects to the event stream websocket and starts a loop to receive and\n handle events from the backend.\n \"\"\"\n\n log.info(f'initializing websocket connection to event stream')\n uri = urljoin('ws:' + splittype(api_base_uri)[1], 'event_stream')\n headers = {'Authorization': 'Bearer ' + token}\n async with websockets.connect(uri, extra_headers=headers) as websocket:\n log.info(f'listening to websocket for events...')\n # Incoming RPC requests are added to this queue, and their results are\n # popped off the queue and sent; the queue is used as a means of\n # serializing responses, otherwise we could have multiple coroutines\n # concurrently trying to write to the same websocket\n queue = asyncio.Queue()\n\n # Start the incoming and outgoing message handlers; a slight variant of\n # this pattern:\n # https://websockets.readthedocs.io/en/stable/intro.html#both\n await multiplex_tasks(handle_incoming(websocket, queue),\n handle_outgoing(websocket, queue))\n\n\nasync def multiplex_tasks(*tasks):\n \"\"\"\n Run multiple coroutines simultaneously as tasks, exiting as soon as any one\n of them raises an exception.\n\n The exception from the coroutine is then re-raised.\n \"\"\"\n\n done, pending = await asyncio.wait(tasks,\n return_when=asyncio.FIRST_EXCEPTION)\n\n try:\n for task in done:\n # If one of the tasks exited with an exception\n # Calling .result() re-raises that exception\n task.result()\n finally:\n for task in pending:\n task.cancel()\n\n\nasync def dispatch_incoming(queue, request):\n \"\"\"\n Dispatch incoming messages to the JSON-RPC method dispatcher.\n\n When the result is ready it is placed on the outgoing queue.\n \"\"\"\n\n response = await dispatch(request)\n log.info(format_rpc_call(request, response))\n await queue.put(response)\n\n\nasync def handle_incoming(websocket, queue):\n \"\"\"\n This coroutine checks the websocket for incoming JSON-RPC requests and\n passes them to `dispatch_incoming`.\n \"\"\"\n\n while True:\n request = await websocket.recv()\n asyncio.ensure_future(dispatch_incoming(queue, request))\n\n\nasync def handle_outgoing(websocket, queue):\n \"\"\"\n This coroutine checks the outgoing response queue for results from\n dispatched RPC methods, and sends them on the websocket.\n \"\"\"\n\n while True:\n response = await queue.get()\n if response.wanted:\n await websocket.send(str(response))\n\n\nclass ArticleCollection:\n \"\"\"Maintain a list of articles sorted by article_id (ascending).\"\"\"\n\n def __init__(self, initial=None, max_size=MAX_ARTICLES):\n self.article_ids = []\n self.articles = {}\n self.max_size = max_size\n if initial:\n for item in initial:\n id_ = item['article_id']\n if id_ not in self.articles:\n self.article_ids.append(id_)\n self.articles[id_] = item\n\n self.article_ids = sorted(self.article_ids)\n # Limit to the max_size highest article IDs\n self.article_ids = self.article_ids[-max_size:]\n\n def __len__(self):\n return len(self.article_ids)\n\n def __getitem__(self, article_id):\n \"\"\"\n Retrieve items from the collection by article_id or a range of\n article_ids.\n \"\"\"\n\n if not isinstance(article_id, slice):\n # The single article case is simple.\n try:\n return self.article_ids[article_id]\n except KeyError:\n raise IndexError(article_id)\n\n # Select ranges of article IDs--this can be tricky because although\n # self.article_ids is assumed to be sorted, it have missing items in\n # the range\n slc = article_id\n start = slc.start\n stop = slc.stop\n\n if start is not None:\n idx = bisect.bisect_left(self.article_ids, start)\n if idx == len(self.article_ids):\n start = None\n else:\n start = idx\n\n if stop is not None:\n # reverse enumerate\n stop = bisect.bisect_left(self.article_ids, stop)\n\n ids = self.article_ids[start:stop:slc.step]\n\n return [self.articles[id_] for id_ in ids]\n\n def push(self, item):\n \"\"\"\n Push a new article to the collection while maintaining the sort\n invariant.\n\n If the new article is already than the lowest article ID and the\n collection is already at capacity, it is discarded.\n \"\"\"\n\n id_ = item['article_id']\n if (id_ in self.articles or\n (len(self.article_ids) == self.max_size and\n id_ < self.article_ids[0])):\n return\n\n bisect.insort_left(self.article_ids, id_)\n self.articles[id_] = item\n\n if len(self.article_ids) > self.max_size:\n old_id = self.article_ids.pop(0)\n del self.articles[old_id]\n\n self.articles[id_] = item\n\n log.debug(f'new article added to the collection: {item}')\n log.debug(f'article collection size: {len(self)}')\n\n\ndef format_rpc_call(request, response=None):\n \"\"\"\n For debugging purposes, print parsed JSON-RPC requests/responses.\n \"\"\"\n\n if isinstance(request, str):\n request = json.loads(request)\n\n if isinstance(response, DictResponse):\n response = response.deserialized()\n else:\n response = None\n\n method = request['method']\n params = request.get('params', {})\n if isinstance(params, list):\n params = ', '.join(repr(v) for v in params)\n else:\n params = ', '.join(f'{k}={v!r}' for k, v in params.items())\n call = f'{method}({params})'\n\n if response is None:\n return call\n\n if 'error' in response:\n return f'{call} !! {response[\"error\"]!r}'\n else:\n return f'{call} -> {response[\"result\"]!r}'\n\n\nclass FileOrToken(click.File):\n \"\"\"\n Extends `click.File` to also accept a JWT token.\n\n If the input value resembles a properly formatted JWT token its value will\n be taken as-is wrapped in an `io.StringIO`. Otherwise the input is assumed\n to be a filename and the file is returned as an open file object.\n \"\"\"\n\n def convert(self, value, param, ctx):\n try:\n jwt.decode(value, verify=False)\n except jwt.DecodeError:\n return super().convert(value, param, ctx)\n\n return io.StringIO(value)\n\n\n@click.command()\n@click.option('-a', '--api-base-uri', default=RENEWAL_API_BASE_URI,\n help='URI for the Renewal HTTP API')\n@click.option('-t', '--token', required=True, type=FileOrToken(),\n help='authentication token for the recsystem; if a valid '\n 'filename is given the token is read from a file instead')\n@click.option('--log-level', default='INFO',\n type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR'],\n case_sensitive=False),\n help='minimum log level to output')\ndef main(api_base_uri, token, log_level):\n logging.basicConfig(level=log_level)\n log.setLevel(log_level)\n coloredlogs.install(level=log_level, logger=log)\n\n if api_base_uri[-1] != '/':\n # Add trailing slash to make it easier to join URL fragments with\n # urljoin()\n api_base_uri += '/'\n\n log.info(f'starting up {RECSYS_NAME} recsystem on {api_base_uri}')\n token = token.read().strip()\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(initialize(api_base_uri, token))\n while True:\n try:\n loop.run_until_complete(request_loop(api_base_uri, token))\n except (websockets.WebSocketException, ConnectionRefusedError):\n log.warning(\n 'lost connection to the backend; trying to re-establish...')\n time.sleep(5)\n except KeyboardInterrupt:\n return\n finally:\n # Cancel all pending tasks\n for task in asyncio.Task.all_tasks(loop=loop):\n task.cancel()\n try:\n # Give the task a chance to finish up\n loop.run_until_complete(task)\n except Exception:\n # This may result in a CancelledError or other miscellaneous\n # exceptions as connections are shut down, but we are exiting\n # anyways so ignore them.\n pass\n\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n\n\nif __name__ == '__main__':\n main(auto_envvar_prefix=ENVVAR_PREFIX)\n","sub_path":"recsystems/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":11559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"452518773","text":"import csv\nfrom django.db import IntegrityError\nfrom tot.utils.ingredient_fetch import fetchIngredients\nfrom tot.models import Drink\n\n\n# Parsing csv file to db models\ndef fetchCsv(file):\n\n #line separator\n separator='|'\n\n # new drinks to add\n newDrinks = []\n\n # new ingredients\n newIngredints = []\n\n file.open()\n file_data = file.read().decode(\"utf-8\")\n lines = file_data.split(\"\\n\")\n\n for line in lines[1:-1]:\n parameters = line.split(separator)\n print(parameters)\n name = parameters[0].strip()\n desc = parameters[1].strip()\n alc = int(float(parameters[2].strip()))\n times = int(float(parameters[3].strip()))\n url = parameters[4].strip()\n ingredients = []\n rest = parameters[5:]\n print(\"REST:\", rest)\n for ing in rest:\n if not ing == '':\n ingredients.append(ing)\n print(\"----------------\")\n print(\"NAME:\", name)\n print(\"DESC:\", desc)\n print(\"ALC:\", alc)\n print(\"TIMES\", times)\n print(\"URL:\", url)\n print(\"INGREDIENTS:\", ingredients)\n\n # saving\n try:\n drink = Drink.objects.create(alcohol_level=alc, drink_name=name, description=desc, timies_drank=times,\n image=url)\n drink.save()\n\n # parsing to form accepted by fetch\n ing_string = convertToFetch(ingredients)\n print('FETCH:' + ing_string)\n newIng, allIng = fetchIngredients(ing_string, drink)\n newIngredints = newIngredints + newIng\n newDrinks.append(name)\n except (IntegrityError, AttributeError) as e:\n print(e)\n print(name)\n pass\n print(\"added drinks:\")\n print(newDrinks)\n print(\"added ingr:\")\n print(newIngredints)\n\n return newDrinks, newIngredints\n\n\n# converts to form accepted by fetchIngredients function\ndef convertToFetch(ingredients):\n ing_string = ''\n for ing in ingredients:\n ing_replaced = ing.replace(';', ',')\n print(ing_replaced)\n ing_string += ing_replaced\n ing_string += ';'\n return ing_string\n","sub_path":"tot/utils/csv_fetch.py","file_name":"csv_fetch.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"43254282","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom matplotlib.animation import FuncAnimation\nfrom copy import copy\n\nclass Branch:\n \"\"\"A simple example class\"\"\"\n def __init__(self, iter):\n self.x = []\n self.y = []\n self.sep = 0.075\n self.decay_rate = 0.01\n self.iter = iter\n\n def reproduce(self):\n return Branch(self.iter)\n\n def degrees_to_rads(self, degs):\n rads = (np.pi/180*degs)\n return rads\n\n def compute_trajectory(self):\n for i in range(iter):\n direction = random.uniform(self.degrees_to_rads(0),\n self.degrees_to_rads(360))\n if i == 0:\n self.x.append(random.randint(-1, 1))\n self.y.append(random.randint(-1, 1))\n elif i == 10:\n self.x.append(self.x[i-1] + self.sep*np.cos(direction))\n self.y.append(self.y[i-1] + self.sep*np.sin(direction))\n else:\n self.x.append(self.x[i-1] + self.sep*np.cos(direction))\n self.y.append(self.y[i-1] + self.sep*np.sin(direction))\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1) # specify (nrows, ncols, axnum)\nax2 = ax1.twinx() # specify (nrows, ncols, axnum)\nax1.set_xlim(-3, 3)\nax1.set_ylim(-3, 3)\nimage_1, = ax1.plot([], [], 'o', color='k')\nimage_2, = ax2.plot([], [], 'o', color='k')\nmarkersize = 3\niter = 10000\nk = 10\n\nseed = Branch(iter)\nseed.compute_trajectory()\nnew_branch = Branch(iter)\n\ndef animate(i):\n image_1.set_data(seed.x[:i+1], seed.y[:i+1])\n image_1.set_markersize(markersize)\n image_2.set_data(new_branch.x[:i+1], new_branch.y[:i+1])\n image_2.set_markersize(markersize)\n\nani = FuncAnimation(fig, animate, frames=iter, interval=1)\nplt.show()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"529892693","text":"# Hint built-in modules: datetime, time\n# 1\n# Sa se defineasca un decorator care:\n# Printeaza ora la care a inceput executia functiei\n# Printeaza ora la care functia s-a finalizat\n# Printeaza cate secunde a rulat functia\n\n# 2 Sa se defineasca un decorator care creaza un log file (un fisier txt cu 'jurnalul' de activitate) pentru fiecare functie decorata care sa contina:\n# Ora la care a inceput/sfarsit\n# Numele functiei\n# In fisier sa avem cate 5 entries pentru fiecare apel de functie (sa avem cate 5 rulari separate in jurnal)\n\n# Sa se aplice decoratoarele(pe rand, si dupa impreuna) pe urmatoarele functii:\n# Functie care printeaza itereaza in 1 milion, si printeaza fiecare numar\n# Functie care creaza 10 fisiere txt separate si scrie de la 1 la 1 milion(linii separate fiecare numar) in fiecare fisier\n# Functie care printeaza 5 cuvinte, dar fiecare cuvant este printat odata la 5 secunde\n\n# Sa incercam sa apelam fiecare functie separat, nu toate odata, considerand ca fiecare functie poate dura destul de mult.\n\nfrom time import time, ctime, sleep\n\ndef detalii_functie(funct):\n def wrapper(*args):\n t1 = time()\n rezultat=funct(*args)\n t2 = time()\n print(f\"Functia a inceput sa ruleze la {ctime(t1)} si s-a finalizat la {ctime(t2)}\")\n timp_rulare=round((t2-t1),4)\n print(f\"Timpul de rulare a fost de {timp_rulare} secunde\")\n return rezultat\n return wrapper\n\ndef log_file(funct):\n def wrapper(*args):\n numar_linii = 0\n t1 = time()\n rezultat=funct(*args)\n t2 = time()\n name=funct.__name__\n file = open(f\"fisier functie {name}.txt\", \"a\")\n file.write(f\"Functia {name} a inceput sa ruleze la {ctime(t1)} si s-a finalizat la {ctime(t2)}.\\nTimpul de rulare a fost de {round((t2-t1),4)} secunde\\n\")\n file.close()\n try:\n file = open(f\"fisier functie {name}.txt\", \"r\")\n for line in file.readlines():\n numar_linii +=1\n file.close()\n except:\n numar_linii = 3\n file = open(f\"fisier functie {name}.txt\", \"a\")\n file.write(f\"Final Executia {(numar_linii//4)+1}\\n\"+\"*\"*70+\"\\n\")\n file.close()\n return rezultat\n return wrapper\n\n# @detalii_functie\n@log_file\ndef million():\n for i in range (1000001):\n print (i)\nmillion()\n\n# @detalii_functie\n@log_file\ndef creare_fisiere():\n for i in range(10):\n file = open(f\"fisier.{i+1}.txt\",\"w\")\n for i in range(1,1000001):\n file.write(f\"{i}\\n\")\n file.close()\ncreare_fisiere()\n\n# @detalii_functie\n@log_file\ndef pause_function(*cuvinte):\n for i in cuvinte:\n sleep(5)\n print(i)\npause_function(\"Acesta\",\"nu\",\"este\",\"un\",\"melc\")\n","sub_path":"Tema21/Tema_21.py","file_name":"Tema_21.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"194541971","text":"# -*- coding: UTF-8 -*-\n'''\nAuthor: chenxing\nDate: 2018-04-30\n'''\n\nimport numpy as np\nimport pickle\nimport jieba\nimport time\nimport wave\nfrom pyaudio import PyAudio, paInt16\nfrom aip import AipSpeech\n\nstop_word = [',', '。', '、', '!', '?', ',', '.', '!', '?', ' ', '', '\\n', '(', ')', '(', ')', '\\ufeff']\n'''\n 停用词集, 包含“啊,吗,嗯”一类的无实意词汇以及标点符号\n'''\n\n'''\n 载入数据\n'''\n\n\ndef loadStopword():\n fr = open('stopword.txt', 'r', encoding=('utf-8'))\n lines = fr.readlines()\n for line in lines:\n stop_word.append(line.strip())\n fr.close()\n\n\n'''\n 创建词集\n params:\n documentSet 为训练文档集\n return:词集, 作为词袋空间\n'''\n\n\ndef createVocabList(documentSet):\n vocabSet = set([])\n for document in documentSet:\n vocabSet = vocabSet | set(document) # union of the two sets\n return list(vocabSet)\n\n\n'''\n 文本处理,如果是未处理文本,则先分词(jieba分词),再去除停用词\n'''\n\n\ndef textParse(bigString): # input is big string, #output is word list\n cutted = jieba.cut(bigString, cut_all=False)\n listOfWord = []\n for word in cutted:\n if word not in stop_word:\n listOfWord.append(word)\n return listOfWord\n\n\n'''\n 交叉训练\n'''\nBAD = 1\nGOOD = 0\n\n\ndef testClassify():\n listAllDoc = []\n listClasses = []\n\n print(\"----loading document list----\")\n\n # 31个标注为差评的文档\n for i in range(1, 32):\n wordList = textParse(open('bad/%d.txt' % i, 'r', encoding=('utf-8')).read())\n listAllDoc.append(wordList)\n listClasses.append(BAD)\n # 31个标注为好评的文档\n for i in range(1, 32):\n wordList = textParse(open('good/%d.txt' % i, 'r', encoding=('utf-8')).read())\n listAllDoc.append(wordList)\n listClasses.append(GOOD)\n\n print(\"----creating vocab list----\")\n # 构建词袋模型\n listVocab = createVocabList(listAllDoc)\n docNum = len(listAllDoc)\n # testSetNum = int(docNum * 0.1)\n testSetNum = 10\n\n trainingIndexSet = list(range(docNum)) # 建立与所有文档等长的空数据集(索引)\n testSet = [] # 空测试集\n\n # 随机索引,用作测试集, 同时将随机的索引从训练集中剔除\n for i in range(testSetNum):\n randIndex = int(np.random.uniform(0, len(trainingIndexSet)))\n testSet.append(trainingIndexSet[randIndex])\n del (trainingIndexSet[randIndex])\n\n trainMatrix = []\n trainClasses = []\n\n for docIndex in trainingIndexSet:\n trainMatrix.append(bagOfWords2VecMN(listVocab, listAllDoc[docIndex]))\n trainClasses.append(listClasses[docIndex])\n\n print(\"----traning begin----\")\n pBAD, pGOODV, pCLASS = trainNaiveBayes(np.array(trainMatrix), np.array(trainClasses))\n\n print(\"----traning complete----\")\n print(\"pBAD:\", pBAD)\n print(\"pGOODV:\", pGOODV)\n print(\"pCLASS:\", pCLASS)\n print(\"bad: %d, good:%d\" % (BAD, GOOD))\n\n args = dict()\n args['pBAD'] = pBAD\n args['pGOODV'] = pGOODV\n args['pCLASS'] = pCLASS\n\n fw = open(\"args.pkl\", \"wb\")\n pickle.dump(args, fw, 2)\n fw.close()\n\n fw = open(\"vocab.pkl\", \"wb\")\n pickle.dump(listVocab, fw, 2)\n fw.close()\n\n errorCount = 0\n for docIndex in testSet:\n vecWord = bagOfWords2VecMN(listVocab, listAllDoc[docIndex])\n if classifyNaiveBayes(np.array(vecWord), pBAD, pGOODV, pCLASS) != listClasses[docIndex]:\n errorCount += 1\n doc = ' '.join(listAllDoc[docIndex])\n print(\"classfication error\", doc)\n print('the error rate is: ', float(errorCount) / len(testSet))\n\n\n# 分类方法(这边只做二类处理)\ndef classifyNaiveBayes(vec2Classify, pBADVec, pGOODVec, pClass1):\n pIsBAD = sum(vec2Classify * pBADVec) + np.log(pClass1) # element-wise mult\n pIsGOOD = sum(vec2Classify * pGOODVec) + np.log(1.0 - pClass1)\n\n if pIsBAD > pIsGOOD:\n return BAD\n else:\n return GOOD\n\n\n'''\n 训练\n params:\n tranMatrix 由测试文档转化成的词空间向量 所组成的 测试矩阵\n tranClasses 上述测试文档对应的分类标签\n'''\n\n\ndef trainNaiveBayes(trainMatrix, trainClasses):\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0]) # 计算矩阵列数, 等于每个向量的维数\n numIsBAD = len([x for x in trainClasses if x == BAD])\n pCLASS = numIsBAD / float(numTrainDocs)\n pBADNum = np.ones(numWords)\n pGOODNum = np.ones(numWords)\n pBADDemon = 2.0\n pGOODDemon = 2.0\n\n for i in range(numTrainDocs):\n if trainClasses[i] == BAD:\n pBADNum += trainMatrix[i]\n pBADDemon += sum(trainMatrix[i])\n else:\n pGOODNum += trainMatrix[i]\n pGOODDemon += sum(trainMatrix[i])\n\n pBADVect = np.log(pBADNum / pBADDemon)\n pGOODVect = np.log(pGOODNum / pGOODDemon)\n\n return pBADVect, pGOODVect, pCLASS\n\n\n'''\n 将输入转化为向量,其所在空间维度为 len(listVocab)\n params: \n listVocab-词集\n inputSet-分词后的文本,存储于set\n'''\n\n\ndef bagOfWords2VecMN(listVocab, inputSet):\n returnVec = [0] * len(listVocab)\n for word in inputSet:\n if word in listVocab:\n returnVec[listVocab.index(word)] += 1\n return returnVec\n\n\n'''\n 读取保存的模型,做分类操作\n'''\n\n\ndef Classify(textList):\n fr = open(\"args.pkl\", \"rb\")\n args = pickle.load(fr)\n pBAD = args['pBAD']\n pGOODV = args['pGOODV']\n pCLASS = args['pCLASS']\n fr.close()\n\n fr = open(\"vocab.pkl\", \"rb\")\n listVocab = pickle.load(fr)\n fr.close()\n\n if len(listVocab) == 0:\n print(\"got no args\")\n return\n\n text = textParse(textList)\n vecWord = bagOfWords2VecMN(listVocab, text)\n class_type = classifyNaiveBayes(np.array(vecWord), pBAD, pGOODV, pCLASS)\n if class_type == 1:\n print(\"classfication type:差评\")\n return BAD\n else:\n print(\"classfication type:好评\")\n return GOOD\n\n\n'''\n 存储音频\n'''\nframerate = 8000 # 采样频率\nNUM_SAMPLES = 2000\nchannels = 1 # 声道\nsampwidth = 2 # 采样字节\nTIME = 1 # 时间\n\n\ndef save_wave_file(filename, data):\n wf = wave.open(filename, 'wb')\n wf.setnchannels(channels)\n wf.setsampwidth(sampwidth)\n wf.setframerate(framerate)\n wf.writeframes(b\"\".join(data))\n wf.close()\n\n\ndef my_record():\n pa = PyAudio()\n stream = pa.open(format=paInt16, channels=1,\n rate=framerate, input=True,\n frames_per_buffer=NUM_SAMPLES)\n my_buf = []\n count = 0\n while count < TIME * 10: # 控制录音时间\n string_audio_data = stream.read(NUM_SAMPLES)\n my_buf.append(string_audio_data)\n count += 1\n print(count, '秒')\n save_wave_file('01.wav', my_buf)\n stream.close()\n\n\nif __name__ == \"__main__\":\n loadStopword()\n goodCount = 0\n badCount = 0\n # 定义常量,此处替换为你自己的应用信息\n APP_ID = '11177120'\n API_KEY = 'lGIefOgI5IuELBPUYziS4APL'\n SECRET_KEY = 'csbojnHuFzZPL5ZfXxd76EZed01T3b2j'\n while True:\n opcode = input(\"input 1 for training, 2 for Crawler text test, 3 for Audio test, Others for text test: \")\n if opcode.strip() == \"1\":\n begtime = time.time()\n testClassify()\n print(\"cost time total:\", time.time() - begtime)\n elif opcode.strip() == \"2\":\n textList = open('taobao.txt', 'r', encoding=('utf-8')).readlines()\n print(len(textList))\n for text in textList:\n if Classify(text) == BAD:\n badCount += 1\n else:\n goodCount += 1\n print(goodCount)\n print(badCount)\n print(\"好评率:\", goodCount / (goodCount + badCount))\n goodCount = 0\n badCount = 0\n elif opcode.strip() == '3':\n my_record()\n # 初始化AipSpeech对象\n aipSpeech = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n print('----录音已完成----')\n print('----开始语音识别----')\n result = aipSpeech.asr(open('01.wav', 'rb').read(), 'wav', 8000, {\n 'dev_pid': '1536',\n })\n\n if result['err_msg'] != 'success.':\n print('未获得语音')\n else:\n print('----语音识别已完成----')\n print(result['result'][0])\n text = result['result'][0]\n Classify(text)\n else:\n text = input(\"input the text:\")\n Classify(text)\n","sub_path":"NaiveBayes/NavieBayes.py","file_name":"NavieBayes.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"592592385","text":"'''\nCreated on 13 Aug 2015\n\n@author: NoNotCar\n'''\nimport pygame\n# import pygame._view\nimport sys\nimport UniJoy\n\nscreen = pygame.display.set_mode((400, 256))\n\nimport Img\nfrom Enemies import *\n\n\ndef die(screen):\n pygame.mixer.music.stop()\n pygame.display.flip()\n pygame.time.wait(1000)\n screen.fill((0, 0, 0))\n Img.bcentre(Img.bfont, \"FOOL\", screen, col=(255, 255, 255))\n pygame.display.flip()\n pygame.time.wait(1000)\n\n\ndef Instruct(instructions, time):\n words = instructions.split()\n text = \"\"\n for i in range(len(words)):\n pygame.event.pump()\n if i:\n text += \" \"\n text += words[i]\n screen.fill((255, 255, 255))\n Img.bcentre(Img.dfont, text, screen, col=(0, 0, 0))\n pygame.display.flip()\n pygame.time.wait(time)\n\n\nclass Player(object):\n def __init__(self):\n self.radius = 100\n self.angle = 0.0\n self.direction = 1\n self.speedmult = 1\n self.lasedown = 0\n\n def get_x(self):\n return int(round(self.radius * math.sin(math.radians(self.angle)))) + 128\n\n def get_y(self):\n return int(round(self.radius * math.cos(math.radians(self.angle)))) + 128\n\n def get_speed(self):\n return self.radius ** -1 * 100 * self.speedmult\n\n\nlevels = (([Asteroid], 15, 1), ([Asteroid, BigAsteroid], 20, 1.5), ([Hostage, Asteroid], 30, 1),\n ([BigAsteroid, SmallAsteroid], 30, 2), ([MustShoot], 30, 1),\n ([Asteroid, Obstacle], 30, 1), ([Obstacle2], 30, 1), ([EnemyShip], 30, 1), ([Ranged], 30, 1),\n ([Obstacle, MustShoot], 30, 1.5), ([EnemyShip2], 60, 2))\nlevel = 0\njnum = pygame.joystick.get_count()\nunijs = [UniJoy.Unijoy(n) for n in range(jnum)]\nassert jnum>0,\"NOT ENOUGH CONTROLLERS\"\nInstruct(\"UP/DOWN TO MOVE\", 500)\npygame.time.wait(500)\nInstruct(\"SHOOT WITH A\", 500)\nwhile True:\n p = Player()\n c = pygame.time.Clock()\n obstacles = []\n plasers = []\n score = 0\n tick = 0\n dead = False\n if level == 4:\n Instruct(\"MUST SHOOT YELLOW\", 500)\n elif level == 5:\n Instruct(\"RED IS IMMORTAL\", 500)\n elif level == 2:\n Instruct(\"DON'T SHOOT PINK\", 500)\n elif level == len(levels) - 1:\n Instruct(\"ULTIMATE DEFENCE\", 1000)\n p.speedmult = levels[level][2]\n if level != 9:\n Instruct(\"LEVEL \" + str(level + 1), 500)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.fill((255, 255, 255) if level != len(levels) - 1 else (255, 150, 0))\n for obsc in levels[level][0]:\n obsc.generate(obstacles)\n ujdir = 0\n for uj in unijs:\n ujd = uj.getdirstick(1)\n if ujd:\n ujdir -= ujd[1]\n if ujdir < -(jnum // 2):\n if p.radius > 30:\n p.radius -= 1\n elif ujdir > jnum // 2:\n if p.radius < 100:\n p.radius += 1\n if any([uj.get_b(\"A\") for uj in unijs]) and not p.lasedown:\n plasers.append([p.get_x() - 8, p.get_y() - 2])\n p.lasedown = 20\n if p.lasedown > 0:\n p.lasedown -= 1\n pygame.draw.circle(screen, (127, 127, 127), (128, 128), p.radius, 1)\n orects = []\n plrects = []\n for obstacle in obstacles:\n orects.append((pygame.draw.rect(screen, obstacle.col,\n pygame.Rect(obstacle.x, obstacle.y, obstacle.w, obstacle.h)), obstacle))\n obstacle.update(obstacles)\n for pos in plasers:\n plrects.append(pygame.draw.rect(screen, (0, 0, 255), pygame.Rect(pos[0], pos[1], 16, 4)))\n pos[0] += 4\n prect = pygame.draw.rect(screen, (0, 0, 0), pygame.Rect(p.get_x() - 8, p.get_y() - 8, 16, 16))\n for ore in [o for o in orects if o[1].plaser]:\n for pr in plrects:\n if ore[0].colliderect(pr):\n obstacles.remove(ore[1])\n if ore[1].hostage:\n die(screen)\n dead = True\n for ore in orects:\n if ore[1].isdeadly and ore[0].colliderect(prect):\n die(screen)\n dead = True\n for obstacle in obstacles:\n if obstacle.x <= -obstacle.w:\n if not obstacle.deadgooff:\n obstacles.remove(obstacle)\n else:\n die(screen)\n dead = True\n if dead:\n break\n for ore in orects:\n if not ore[1].isdeadly and ore[0].colliderect(prect):\n obstacles.remove(ore[1])\n for pos in plasers:\n if pos[0] > 400:\n plasers.remove(pos)\n p.angle = (p.angle - p.get_speed()) % 360\n pygame.display.flip()\n c.tick(60)\n if tick == 60:\n score += 1\n tick = 0\n if score == levels[level][1]:\n pygame.mixer.music.stop()\n Instruct(\"WELL DONE\", 500)\n level += 1\n if level == 10:\n Instruct(\"YOU WIN!\", 2000)\n sys.exit()\n break\n else:\n tick += 1\n","sub_path":"Orbital/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"431978485","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport random\nfrom App.api.utils.FileCommon import FileCommon\nimport App.api.utils.RedisCommon as RedisCommon\nimport App.api.utils.Constants as Constants\nclass TestCommon():\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\n\t@classmethod\n\tdef randomStart(cls, token, typ, difficulty, length):\n\t\tli = cls.genQuestionList(typ, length)\n\t\t\"\"\"\n\t\t保存li内容到redis\n\t\t\"\"\"\n\t\tRedisCommon.saveQuestion(token, li)\n\t\treturn FileCommon.gen(li[0], difficulty)\n\n\t@classmethod\n\tdef nextQuestion(cls, answer, g):\n\t\ttoken = g.token\n\t\tdifficulty = g.difficulty\n\t\t\"\"\"\n\t\t从redis中取出余下问题链接和答案结果链接\n\t\t\"\"\"\n\t\tquestionList = RedisCommon.getQuestion(token)\n\t\tcorrect = RedisCommon.getCorrect(token)\n\t\tcheckList = RedisCommon.getCheck(token)\n\n\t\tresult = cls.check(questionList[len(checkList)], answer)\n\t\tif result:\n\t\t\tcorrect += 1\n\t\t\t\"\"\"\n\t\t\t\t保存分数,答对题数\n\t\t\t\"\"\"\n\t\t\tRedisCommon.saveCorrect(token, correct)\n\n\t\t\"\"\"\n\t\t\t答题内容\n\t\t\"\"\"\n\t\tRedisCommon.saveCheck(token, answer)\n\t\tprint('%d-%d' %(len(checkList), len(questionList)))\n\t\tif len(checkList) == len(questionList)-1:\n\t\t\treturn Constants.COMPLETE\n\t\treturn FileCommon.gen(questionList[len(checkList)+1], difficulty)\n\n\n\t@classmethod\n\tdef getResult(cls, g):\n\t\ttoken = g.token\n\t\tdifficulty = g.difficulty\n\t\t\"\"\"\n\t\t\t从redis中取出所有问题链接和答案结果链接与评分\n\t\t\"\"\"\n\t\tquestionList = RedisCommon.getQuestion(token)\n\t\tcorrect = RedisCommon.getCorrect(token)\n\t\tcheckList = RedisCommon.getCheck(token)\n\n\t\tRedisCommon.saveRanking(token, difficulty)\n\t\tif len(questionList) == 50:\n\t\t\tsaveRank(token, difficulty)\n\t\tretList = []\n\t\tfor i in range(len(questionList)):\n\t\t\tretList.append({\n\t\t\t\t\"q\": questionList[i].decode('utf8'),\n\t\t\t\t\"a\": checkList[i].decode('utf8')\n\t\t\t})\n\t\treturn {\n\t\t\t\"data\": retList\n\t\t}\n\n\t@classmethod\n\tdef check(cls, question, answer):\n\t\treturn question == answer\n\n\n\t@classmethod\n\tdef genQuestionList(cls, typ, length):\n\t\treturn random.sample(FileCommon.combine(typ), length)\n\n\n\t@classmethod\n\tdef savePlayer(cls, token, name):\n\t\tRedisCommon.savePlayer(token, name)\n\n\t@classmethod\n\tdef getRank(cls, difficulty):\n\t\treturn RedisCommon.getRanking(difficulty)\n\n\t@classmethid\n\tdef saveRank(cls, token, difficulty):\n\t\tRedisCommon.saveRank(token, difficulty)\n\n\n\n","sub_path":"App/api/utils/TestCommon.py","file_name":"TestCommon.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"44753006","text":"\"\"\"\n================\nPrecision-Recall\n================\n\nExample of Precision-Recall metric to evaluate classifier output quality.\n\nIn information retrieval, precision is a measure of result relevancy, while\nrecall is a measure of how many truly relevant results are returned. A high\narea under the curve represents both high recall and high precision, where high\nprecision relates to a low false positive rate, and high recall relates to a\nlow false negative rate. High scores for both show that the classifier is\nreturning accurate results (high precision), as well as returning a majority of\nall positive results (high recall).\n\nA system with high recall but low precision returns many results, but most of\nits predicted labels are incorrect when compared to the training labels. A\nsystem with high precision but low recall is just the opposite, returning very\nfew results, but most of its predicted labels are correct when compared to the\ntraining labels. An ideal system with high precision and high recall will\nreturn many results, with all results labeled correctly.\n\nPrecision (:math:`P`) is defined as the number of true positives (:math:`T_p`)\nover the number of true positives plus the number of false positives\n(:math:`F_p`).\n\n:math:`P = \\\\frac{T_p}{T_p+F_p}`\n\nRecall (:math:`R`) is defined as the number of true positives (:math:`T_p`)\nover the number of true positives plus the number of false negatives\n(:math:`F_n`).\n\n:math:`R = \\\\frac{T_p}{T_p + F_n}`\n\nThese quantities are also related to the (:math:`F_1`) score, which is defined\nas the harmonic mean of precision and recall.\n\n:math:`F1 = 2\\\\frac{P \\\\times R}{P+R}`\n\nIt is important to note that the precision may not decrease with recall. The\ndefinition of precision (:math:`\\\\frac{T_p}{T_p + F_p}`) shows that lowering\nthe threshold of a classifier may increase the denominator, by increasing the\nnumber of results returned. If the threshold was previously set too high, the\nnew results may all be true positives, which will increase precision. If the\nprevious threshold was about right or too low, further lowering the threshold\nwill introduce false positives, decreasing precision.\n\nRecall is defined as :math:`\\\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does\nnot depend on the classifier threshold. This means that lowering the classifier\nthreshold may increase recall, by increasing the number of true positive\nresults. It is also possible that lowering the threshold may leave recall\nunchanged, while the precision fluctuates.\n\nThe relationship between recall and precision can be observed in the\nstairstep area of the plot - at the edges of these steps a small change\nin the threshold considerably reduces precision, with only a minor gain in\nrecall. See the corner at recall = .59, precision = .8 for an example of this\nphenomenon.\n\n.. note::\n\n See also :func:`sklearn.metrics.average_precision_score`,\n :func:`sklearn.metrics.recall_score`,\n :func:`sklearn.metrics.precision_score`\n\"\"\"\nprint(__doc__)\n\nimport random\nimport pylab as pl\nimport numpy as np\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import auc\nfrom sklearn.cross_validation import train_test_split\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nX, y = X[y != 2], y[y != 2] # Keep also 2 classes (0 and 1)\n\n# Add noisy features\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# Split into training and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=random_state)\n\n# Run classifier\nclassifier = svm.SVC(kernel='linear', probability=True,\n random_state=random_state)\nprobas_ = classifier.fit(X_train, y_train).predict_proba(X_test)\n\n# Compute Precision-Recall and plot curve\nprecision, recall, thresholds = precision_recall_curve(y_test, probas_[:, 1])\narea = auc(recall, precision)\nprint(\"Area Under Curve: %0.2f\" % area)\n\npl.clf()\npl.plot(recall, precision, label='Precision-Recall curve')\npl.xlabel('Recall')\npl.ylabel('Precision')\npl.ylim([0.0, 1.05])\npl.xlim([0.0, 1.0])\npl.title('Precision-Recall example: AUC=%0.2f' % area)\npl.legend(loc=\"lower left\")\npl.show()\n","sub_path":"examples/plot_precision_recall.py","file_name":"plot_precision_recall.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"302434240","text":"#!/usr/bin/env python3\n\nimport argparse\nimport codecs\nimport http.client\nimport http.server\nimport json\nimport logging\nimport re\nimport sys\nimport time\nimport csv\nimport os\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport webbrowser\nfrom datetime import timedelta\n\nlogging.basicConfig(level=20, datefmt=\"%I:%M:%S\", format=\"[%(asctime)s] %(message)s\")\n\n\nclass SpotifyAPI:\n # Requires an OAuth token.\n def __init__(self, auth):\n self._auth = auth\n\n # Gets a resource from the Spotify API and returns the object.\n def get(self, url, params={}, tries=3):\n # Construct the correct URL.\n if not url.startswith(\"https://api.spotify.com/v1/\"):\n url = \"https://api.spotify.com/v1/\" + url\n if params:\n url += (\"&\" if \"?\" in url else \"?\") + urllib.parse.urlencode(params)\n\n # Try the sending off the request a specified number of times before giving up.\n for _ in range(tries):\n try:\n req = urllib.request.Request(url)\n req.add_header(\"Authorization\", \"Bearer \" + self._auth)\n res = urllib.request.urlopen(req)\n reader = codecs.getreader(\"utf-8\")\n return json.load(reader(res))\n except Exception as err:\n logging.info(\"Couldn't load URL: {} ({})\".format(url, err))\n time.sleep(2)\n logging.info(\"Trying again...\")\n sys.exit(1)\n\n # fetches liked, playlists, podcast episodes and albums then joins them\n def list(self, url, params={}):\n response = self.get(url, params)\n items = response[\"items\"]\n\n # loop through to bring all tracks and their data\n while response[\"next\"]:\n logging.info(f\"Loaded {len(items)}/{response['total']} items\")\n\n response = self.get(response[\"next\"])\n items += response[\"items\"]\n\n return items\n\n # fetches followed artists and joins them\n def list_artists(self, url, params={}):\n response = self.get(url, params)\n items = response['artists'][\"items\"]\n\n # loop through to bring all tracks and their data\n while response['artists'][\"next\"]:\n logging.info(f\"Loaded {len(items)}/{response['artists']['total']} items\")\n\n response = self.get(response['artists'][\"next\"])\n items += response['artists'][\"items\"]\n\n return items\n\n # Pops open a browser window for a user to log in and authorize API access.\n @staticmethod\n def authorize(client_id, scope):\n url = \"https://accounts.spotify.com/authorize?\" + urllib.parse.urlencode(\n {\n \"response_type\": \"token\",\n \"client_id\": client_id,\n \"scope\": scope,\n \"redirect_uri\": f\"http://127.0.0.1:{SpotifyAPI._SERVER_PORT}/redirect\",\n }\n )\n logging.info(f\"Authorizing... (click if browser doesn't open)\\n{url}\\n\")\n webbrowser.open(url)\n\n # Start a simple, local HTTP server to listen for the authorization token... (i.e. a hack).\n server = SpotifyAPI._AuthorizationServer(\"127.0.0.1\", SpotifyAPI._SERVER_PORT)\n try:\n while True:\n server.handle_request()\n except SpotifyAPI._Authorization as auth:\n return SpotifyAPI(auth.access_token)\n\n # The port that the local server listens on. Don't change this,\n # as Spotify only will redirect to certain predefined URLs.\n _SERVER_PORT = 43019\n\n class _AuthorizationServer(http.server.HTTPServer):\n def __init__(self, host, port):\n http.server.HTTPServer.__init__(\n self, (host, port), SpotifyAPI._AuthorizationHandler\n )\n\n # Disable the default error handling.\n def handle_error(self, request, client_address):\n raise\n\n class _AuthorizationHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n # The Spotify API has redirected here, but access_token is hidden in the URL fragment.\n # Read it using JavaScript and send it to /token as an actual query string...\n if self.path.startswith(\"/redirect\"):\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\n b''\n )\n\n # Read access_token and use an exception to kill the server listening...\n elif self.path.startswith(\"/token?\"):\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(\n b\"Thanks! You may now close this window.\"\n )\n\n access_token = re.search(\"access_token=([^&]*)\", self.path).group(1)\n logging.info(\"Received access token from Spotify\")\n raise SpotifyAPI._Authorization(access_token)\n\n else:\n self.send_error(404)\n\n # Disable the default logging.\n def log_message(self, format, *args):\n pass\n\n class _Authorization(Exception):\n def __init__(self, access_token):\n self.access_token = access_token\n\n# simple recursive y/n input with default\ndef yesno(question, default=None):\n ans = input(question).strip().lower()\n\n if default is not None:\n if ans == '':\n if default == 'y':\n return True\n return False\n elif ans not in ['y', 'n']:\n print(f'{ans} is invalid, please try again...')\n return yesno(question)\n if ans == 'y':\n return True\n else:\n if ans not in ['y', 'n']:\n print(f'{ans} is invalid, please try again...')\n return yesno(question)\n if ans == 'y':\n return True\n\n return False\n\n# return formatted hh mm ss\ndef timematter(x):\n s = timedelta(seconds=x)\n\n if s.days < 1:\n if s.seconds <= 60 * 60:\n out = f'{s.seconds//60}m {s.seconds - (s.seconds//60)*60}s'\n else:\n out = f'{s.seconds//(60*60)}h {int(s.seconds/60 - (s.seconds//3600)*60)}m {s.seconds - (s.seconds//60)*60}s'\n else:\n out = f'{s.days}d {s.seconds//(60*60)}h {int(s.seconds/60 - (s.seconds//3600)*60)}m {s.seconds - (s.seconds//60)*60}s'\n return out\n\n# save tracks to csv\ndef save_track(filename, track_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'Track ID',\n 'Album ID',\n 'Track Name',\n 'Album Name',\n 'Artist Name(s)',\n 'Release Date',\n 'Duration (ms)',\n 'Explicity',\n 'Album Type',\n 'Popularity',\n 'Added On',\n 'Album Tracks',\n 'Track URL',\n 'Album URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through tracks and add them as rows\n for track in track_list:\n try:\n writer.writerow({\n 'Track ID': track['track']['id'],\n 'Album ID': track['track']['album']['id'],\n 'Track Name': track['track']['name'],\n 'Album Name': track['track']['album']['name'],\n 'Album Tracks': track['track']['album']['total_tracks'],\n 'Artist Name(s)': \", \".join([artist['name'] for artist in track['track']['artists']]),\n 'Release Date': track['track']['album']['release_date'],\n 'Duration (ms)': timematter(int(track['track']['duration_ms']) / 1000),\n 'Explicity': track['track']['explicit'],\n 'Album Type': track['track']['album']['album_type'],\n 'Popularity': track['track']['popularity'],\n 'Added On': track['added_at'],\n 'Track URL': track['track']['external_urls']['spotify'],\n 'Album URL': track['track']['album']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load track {track['track']['name']}\")\n continue\n\n file.close()\n\n# save artists to csv\ndef save_artist(filename, artist_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Type',\n 'Followers',\n 'Popularity',\n 'URL'\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through artists and add them as rows\n for artist in artist_list:\n try:\n writer.writerow({\n 'ID': artist['id'],\n 'Name': artist['name'],\n 'Type': artist['type'],\n 'Followers': artist['followers']['total'],\n 'Popularity': artist['popularity'],\n 'URL': artist['external_urls']['spotify'],\n })\n except KeyError:\n logging.error(f\"Failed to load artist {artist['name']}\")\n continue\n\n file.close()\n\n# save albums to csv\ndef save_album(filename, album_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Tracks',\n 'Artist Name(s)',\n 'Release Date',\n 'Label',\n 'Type',\n 'Popularity',\n 'Added On',\n 'URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through albums and add them as rows\n for album in album_list:\n try:\n writer.writerow({\n 'ID': album['album']['id'],\n 'Name': album['album']['name'],\n 'Tracks': album['album']['total_tracks'],\n 'Artist Name(s)': \", \".join([album['name'] for album in album['album']['artists']]),\n 'Release Date': album['album']['release_date'],\n 'Label': album['album']['label'],\n 'Type': album['album']['album_type'],\n 'Popularity': album['album']['popularity'],\n 'Added On': album['added_at'],\n 'URL': album['album']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load album {album['album']['name']}\")\n continue\n\n file.close()\n\n# save podcasts to csv\ndef save_podcast(filename, podcast_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'ID',\n 'Name',\n 'Publisher',\n 'Description',\n 'Episodes',\n 'Type',\n 'Explicity',\n 'Added On',\n 'URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through podcases and add them as rows\n for podcast in podcast_list:\n try:\n writer.writerow({\n 'ID': podcast['show']['id'],\n 'Name': podcast['show']['name'],\n 'Description': podcast['show']['description'],\n 'Episodes': podcast['show']['total_episodes'],\n 'Publisher': podcast['show']['publisher'],\n 'Type': podcast['show']['media_type'],\n 'Explicity': podcast['show']['explicit'],\n 'Added On': podcast['added_at'],\n 'URL': podcast['show']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load podcast {podcast['show']['name']}\")\n continue\n\n file.close()\n\n# save episodes to csv\ndef save_episode(filename, episode_list):\n file = open(filename, 'w')\n\n # init sheet rows\n fieldnames = [\n 'Episode ID',\n 'Show ID',\n 'Episode Name',\n 'Show Name',\n 'Publisher',\n 'Description',\n 'Release Date',\n 'Duration (ms)',\n 'Explicity',\n 'Show Type',\n 'Added On',\n 'Episode URL',\n 'Show URL',\n ]\n\n # init csv writer\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n\n # loop through episodes and add them as rows\n for episode in episode_list:\n try:\n writer.writerow({\n 'Episode ID': episode['episode']['id'],\n 'Show ID': episode['episode']['show']['id'],\n 'Episode Name': episode['episode']['name'],\n 'Show Name': episode['episode']['show']['name'],\n 'Publisher': episode['episode']['show']['publisher'],\n 'Description': episode['episode']['description'],\n 'Release Date': episode['episode']['release_date'],\n 'Duration (ms)': timematter(int(episode['episode']['duration_ms']) / 1000),\n 'Explicity': episode['episode']['explicit'],\n 'Show Type': episode['episode']['show']['media_type'],\n 'Added On': episode['added_at'],\n 'Episode URL': episode['episode']['external_urls']['spotify'],\n 'Show URL': episode['episode']['show']['external_urls']['spotify']\n })\n except KeyError:\n logging.error(f\"Failed to load episode {episode['episode']['name']}\")\n continue\n\n file.close()\n\n\n# log into the Spotify API.\nspotify = SpotifyAPI.authorize(\n # id from spotify client app created at\n # https://developer.spotify.com/dashboard/applications\n # it has http://127.0.0.1:43019/redirect as the redirect URI\n client_id=\"fc84b0b659d64f568f72d0d6009ad965\",\n scope=\"playlist-read-private playlist-read-collaborative user-library-read user-follow-read\",\n)\n\n\n# get the ID of the logged in user.\nlogging.info('Loading user info...')\nme = spotify.get('me')\nlogging.info(f\"Logged in as {me['display_name']} ({me['id']})\")\n\n\n# for playlists not owned by user\nsave_foreign_playlists = yesno('Save playlists not owned by you? [y/N]: ', 'n')\n\n\n# create needed dirs\nlogging.info('Creating needed directories')\nos.makedirs('./done/Music/Playlists', exist_ok=True)\nos.makedirs('./done/Podcasts', exist_ok=True)\n\n\n# save liked songs\nlogging.info('Loading liked songs...')\nliked_tracks = spotify.list(f\"users/{me['id']}/tracks\", {'limit': 50})\nlogging.info('Saving liked songs')\nsave_track('done/Music/Liked.csv', liked_tracks)\n\n\n# get all playlist data\nplaylist_data = spotify.list(f\"users/{me['id']}/playlists\", {'limit': 50})\n\n\n# get user's playlist data\nlogging.info(\"Loading user's playlists...\")\nuser_playlists = [playlist for playlist in playlist_data if playlist['owner']['id'] == me['id']]\nlogging.info(f\"Found {len(user_playlists)} user's playlists\")\n\n# saving user's playlist songs\nfor playlist in user_playlists:\n logging.info(f\"Loading user playlist: {playlist['name']} ({playlist['tracks']['total']} songs)\")\n playlist_tracks = spotify.list(playlist['tracks']['href'], {'limit': 100})\n logging.info(f\"Saving {playlist['name']}'s songs\")\n save_track(f\"done/Music/Playlists/{playlist['name']} - {playlist['id']}.csv\", playlist_tracks)\n\n\n# check whether to save foreign playlists\nif save_foreign_playlists:\n # get foreign playlist data\n logging.info(\"Loading foreign playlists...\")\n foreign_playlists = [playlist for playlist in playlist_data if playlist['owner']['id'] != me['id']]\n logging.info(f\"Found {len(foreign_playlists)} foreign playlists\")\n\n # saving foreign playlist songs\n for playlist in foreign_playlists:\n logging.info(f\"Loading foreign playlist: {playlist['name']} ({playlist['tracks']['total']} songs)\")\n playlist_tracks = spotify.list(playlist['tracks']['href'], {'limit': 100})\n logging.info(f\"Saving {playlist['name']}'s songs\")\n save_track(f\"done/Music/Playlists/{playlist['name']} - {playlist['id']}.csv\", playlist_tracks)\n\n\n# following artists data\nlogging.info('Loading followed artists...')\nfollowing_artist_data = spotify.list_artists('me/following', {'type': 'artist', 'limit': 50})\nlogging.info(f'Found {len(following_artist_data)} artists')\nsave_artist('done/Music/Artists.csv', following_artist_data)\n\n\n# saved album data\nlogging.info('Loading saved albums...')\nsaved_album_data = spotify.list('me/albums', {'limit': 50})\nlogging.info(f'Found {len(saved_album_data)} albums')\nsave_album('done/Music/Albums.csv', saved_album_data)\n\n\n# saved podcast shows data\nlogging.info('Loading saved podcast shows...')\nsaved_podcast_data = spotify.list('me/shows', {'limit': 50})\nlogging.info(f'Found {len(saved_podcast_data)} podcasts')\nsave_podcast('done/Podcasts/Shows.csv', saved_podcast_data)\n\n\n# saved podcast episode data\nlogging.info('Loading saved podcast episodes...')\nsaved_episode_data = spotify.list(\"me/episodes\", {'limit': 50})\nlogging.info('Saving episodes')\nsave_episode('done/Podcasts/Episodes.csv', saved_episode_data)\n","sub_path":"spotify-backup.py","file_name":"spotify-backup.py","file_ext":"py","file_size_in_byte":16945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"477378935","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nclass Projectile(object):\n # Class for the information about the object and movement tracking\n\n def __init__(self, y, length, acc, mass, rad, Cd, Rho, liquidLevel, RhoAir, multiLiquids, pipeLength):\n # SO MANY VARIABLES\n self.y = y\n self.yinit = y\n self.yprev = self.y\n self.length = length\n self.vel = 0\n self.acc = acc\n self.top = y + length\n self.mass = mass\n self.rad = rad\n self.Cd = Cd # Drag Coefficient\n self.Rho = Rho # Fluid Density\n self.RhoAir = RhoAir # Air Density\n\n # Fill pipe based on if we want object to fall through air or not\n if(not multiLiquids):\n self.liquidLevel = 2 * pipeLength\n else:\n self.liquidLevel = liquidLevel\n\n # data storage\n self.data = []\n self.realData = []\n self.velData = []\n\n # Accurate volume calculations\n self.cone = math.pi * self.rad**2 * self.length / 3.0\n self.sphere = 2.0 / 3.0 * math.pi * self.rad**3\n self.volume = self.cone + self.sphere\n self.density = self.mass / self.volume\n # Print density of object so we know it doesnt float\n print(\"Density of object: \" + str(self.density))\n\n # Update position to simulate movement\n def update(self, dt, time):\n self.Forces(dt)\n self.Position(dt, time)\n\n def checkBlocking(self, gates):\n for i in range(len(gates)):\n if self.y < gates[i].y1 and self.top > gates[i].y2:\n self.data.append([time, gates[i].y1])\n\n def Forces(self, dt):\n #Fb = (4 / 3.0) * math.pi * self.rad**3 * self.Rho * 9.81\n if(self.y < self.liquidLevel):\n Fb = self.volume * self.Rho * 9.81\n Fd = 0.5 * self.Cd * self.Rho * (math.pi * self.rad**2) * (self.vel**2)\n else:\n #Fb = (4 / 3.0) * math.pi * self.rad**3 * self.RhoAir * 9.81\n Fb = 0\n Fd = 0.5 * self.Cd * self.RhoAir * (math.pi * self.rad**2) * (self.vel**2)\n\n Mg = self.mass * 9.81\n self.acc = (-Mg + Fd + Fb) / self.mass\n #self.acc = (-Mg + Fd) / self.mass\n\n\n def Position(self, dt, time):\n self.y = self.yprev + self.vel * dt + (0.5) * (self.acc) * (dt**2)\n self.top = self.y + self.length\n self.vel = (self.y - self.yprev) / dt\n self.yprev = self.y\n self.realData.append([time, self.y])\n self.velData.append([time, self.vel])\n\n# gate object, didnt have to make it but made it easier to think about in my head\nclass Gate(object):\n # Class for the transistors\n def __init__(self, y1, length):\n self.y1 = y1\n self.y2 = y1 + length\n\n# Pipe constants\npipe = 10.0 # total length of pipe\nnumSensors = int(pipe / .12) # Number of sensors being used\nstarty = pipe + .1\n\n\n#---------------------#\n# Simulate #\n#---------------------#\n\n# Create projectile object\nproj = Projectile(starty, # ystart(where it starts the object)\n .06, # length\n -9.81, # Gravity\n 0.150, # Mass(kg)\n .03, # radius(m)\n .09, # Cd(Coefficient of drag for shape of object)\n 1000, # RhoLiquid(Density of liquid at bottom of pipe)\n pipe/2, # liquidLevel(Where in pipe liquid starts)\n 1.225, # RhoAir(Density of liquid at top of pipe)\n True, # Whether or not there are multiple liquids\n pipe) # Height of pipe\n\n#termvel = math.sqrt((2 * proj.mass * 9.81) / (proj.Rho * math.pi * proj.rad**2 * proj.Cd))\n#print(str(termvel))\n\n# Pre-run info\ngates = [] # List of gate objects\ndt = .0001 # Time step length\ntime = 0 # Current Time\nhitTerminal = False\ntermvelTime = 0\n\n# Make the gates\nfor i in range(numSensors):\n gates.append(Gate((pipe * i) / numSensors + .12, .001))\n\n# update function for the while loop of the simulationn\ndef update(time):\n proj.update(dt, time)\n proj.checkBlocking(gates)\n\nrunning = True\n\nif(proj.density <= proj.Rho):\n sys.exit(\"Projectile Floats!!\")\n\n#Euler updating loop\nwhile(running):\n update(time)\n if(proj.top < 0):\n running = False\n # Store time for termvel vertical line in graphs\n if(proj.acc > -0.005 and hitTerminal == False):\n hitTerminal = True\n termvelTime = time\n time += dt\n\n#print(proj.acc)\n#print(termvelTime)\n#-----------------------#\n# Plotting #\n#-----------------------#\n\n# Simulated data gathering of X and Y\nx = []\ny = []\n\n# Actual X and Y calculated by simulation\nrealx = []\nrealy = []\n\n# Instantaneous Velocitites\nvelx = []\nvely = []\n\nfor i in range(len(proj.data)):\n x.append(proj.data[i][0])\n y.append(proj.data[i][1])\n\nfor i in range(len(proj.realData)):\n realx.append(proj.realData[i][0])\n realy.append(proj.realData[i][1])\n\nfor i in range(len(proj.velData)):\n velx.append(proj.velData[i][0])\n vely.append(proj.velData[i][1])\n\n# Figure one\n#plt.figure(1)\nplt.subplot(211)\nplt.title(str(proj.mass) + \"kg mass, \" + str(proj.rad) + \"m radius\")\nplt.scatter(realx, realy, color=\"r\")\n#plt.scatter(x, y)\nplt.ylabel(\"pos(m)\")\nplt.xlabel(\"time(s)\")\n\nplt.subplot(212)\nplt.scatter(velx, vely, color = 'g')\nplt.axvline(x = termvelTime, ymin = 0, ymax = 1, color = 'k')\nplt.ylabel(\"vel(m/s)\")\nplt.xlabel(\"time(s)\")\nplt.show()\n\n# Saving to a file\n'''\nfilename = str(proj.mass * 1000).split('.', 1)\nplt.savefig(filename[0] + 'gBothDrop.png')\nplt.show()\n'''\n","sub_path":"capstone/simData.py","file_name":"simData.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"286246841","text":"\"\"\"\r\n771. 宝石与石头\r\n\r\n给定字符串J 代表石头中宝石的类型,和字符串 S代表你拥有的石头。 S 中每个字符代表了一种你拥有的石头的类型,你想知道你拥有的石��中有多少是宝石。\r\n\r\nJ 中的字母不重复,J 和 S中的所有字符都是字母。字母区分大小写,因此\"a\"和\"A\"是不同类型的石头。\r\n\r\n示例 1:\r\n\r\n输入: J = \"aA\", S = \"aAAbbbb\"\r\n输出: 3\r\n示例 2:\r\n\r\n输入: J = \"z\", S = \"ZZ\"\r\n输出: 0\r\n注意:\r\n\r\nS 和 J 最多含有50个字母。\r\n J 中的字符不重复。\r\n\r\n来源:力扣(LeetCode)\r\n链接:https://leetcode-cn.com/problems/jewels-and-stones\r\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def numJewelsInStones(self, J: str, S: str) -> int:\r\n J = dict(zip(J, [0]*len(J)))\r\n jewels = 0\r\n for stone in S:\r\n if stone in J:\r\n jewels += 1\r\n return jewels\r\n\r\n\r\nif __name__ == '__main__':\r\n result = Solution().numJewelsInStones('aA', 'aAAbbbb')\r\n print(result)\r\n","sub_path":"t771.py","file_name":"t771.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"244123651","text":"n = int(input())\narr = []\nfor _ in range(n):\n arr.append(list(map(int,input().split())))\n\nlen = len(arr)\nrank = [1] * len\n\nfor i in range(len):\n for x in range(len):\n if i == x:\n continue\n if arr[i][0] < arr[x][0] and arr[i][1] < arr[x][1]:\n rank[i]+=1\n\nfor i in rank:\n print(i,end=' ')","sub_path":"VS 2019/BOJ_Python/단계/11. 브루트 포스/BOJ_7568.py","file_name":"BOJ_7568.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"250412365","text":"\r\n\r\ndef run():\r\n #Inputs the reference string\r\n print(\"Enter the reference string: \", end=\"\")\r\n refList = list(map(int, input().strip().split()))\r\n\r\n #Inputs the capacity or size of the page frame\r\n print(\"Enter the number of frames: \", end=\"\")\r\n capacity = int(input())\r\n \r\n #Initiates the page frame to be given \r\n page_frame = [] \r\n #Initiates the page fault. set defaults to 0\r\n pageFaults = 0\r\n #This one discovers what is recently used during comparison\r\n most_recently_used = None\r\n\r\n #Loops through the entire reference list to check with the page_frame\r\n for i in refList:\r\n #Checks if the element is in the page frame\r\n if i not in page_frame:\r\n #If the page frame is empty or less than the capacity\r\n if len(page_frame) < capacity:\r\n page_frame.append(i)\r\n #If it's not empty, then replace the one with the most recently used \r\n else:\r\n #Finds the matching page inside the page_frame and replaces it\r\n index = page_frame.index(most_recently_used)\r\n page_frame[index] = i\r\n #Increase the page fault since the page is not in the page frame\r\n pageFaults +=1\r\n #Sets the current checked frame as the one as the most recently used.\r\n most_recently_used = i\r\n \r\n #Prints the page frame\r\n print(\"Total Page Faults: {}\".format(pageFaults))\r\n","sub_path":"Page Fault Algorithm/PAGE_FAULT_CONTENT/MRU.py","file_name":"MRU.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"234196072","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.views.generic import ListView\n\n# Create your views here.\n\n\n\n@login_required(login_url = 'login')\ndef function(request):\n post = Post.objects.all()\n context = {\n 'posts': post\n }\n return render(request,'home.html', context)\n\ndef fun2(request):\n return render(request,'about.html')\n\ndef createPost(request):\n form = PostForm(initial = {'author' : request.user})\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n form.save()\n form = PostForm()\n\n context = {'form': form}\n\n return render (request,'createPost.html', context)\n\ndef updatePost(request, _id):\n postid = Post.objects.get(id=_id)\n form = PostForm(instance = postid)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=postid)\n if form.is_valid():\n form.save()\n return redirect('home')\n\n context = {'form': form}\n\n return render (request,'createPost.html', context)\n\ndef myPost(request,_id):\n user = User.objects.get(id=_id)\n post = user.post_set.all()\n context = {'user' : user, 'post': post}\n return render(request,'mypost.html',context)\n\ndef userPost(request,_id):\n # user = User.objects.get(Post.author)\n post = User.post_set.all(Post.author)\n context = { 'post': post}\n return render(request,'userPost.html',context)\n\nclass UserPostListView(ListView):\n model = Post\n template_name = 'userPost.html' # /_.html\n context_object_name = 'posts'\n\n def get_queryset(self):\n user = get_object_or_404(User, username=self.kwargs.get('username'))\n return Post.objects.filter(author=user)\n\n\ndef delete(request,_id):\n post = Post.objects.get(id=_id)\n if request.method == \"POST\":\n # form = PostForm(request.POST, instance=postid)\n post.delete()\n return redirect('home')\n\n context = {'post' :post}\n\n return render(request,'delete.html', context)\n\n\n\n\n","sub_path":"blog/blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"297086166","text":"from os.path import join, dirname, sep, splitext\nfrom path import Path\nfrom glob import glob\nimport pygame\n\nclass Character_assets:\n def __init__(self, path_str):\n self.assets = dict()\n\n # Encontra os assets dentro das pastas\n path = path_str.split(\"/\")\n modules = Path(dirname(__file__)).glob(join(*path, \"**\", \"**\", \"*\"))\n\n for path in modules:\n # Separa arquivo das as duas ultimas pastas parentes do caminho\n *directory, dtype, key, filename = path.split(sep)\n\n # Separa nome e formato do arquivo\n file_split = splitext(filename)\n name, extension = file_split\n\n # Registra pasta do tipo de asset\n if not dtype in self.assets:\n self.assets[dtype] = {}\n\n # Registra o nome do asset\n if not key in self.assets[dtype]:\n self.assets[dtype][key] = []\n\n # Lida com arquivos de imagem\n if extension in (\".png\", \".jpg\"):\n asset = pygame.image.load(path).convert_alpha()\n\n # Lida com arquivos de som\n elif extension in (\".ogg\", \".wav\", \".mp3\"):\n asset = pygame.mixer.Sound(path)\n\n # Registra o asset ou variação do mesmo no escopo do programa\n self.assets[dtype][key].append(asset)\n\n\n def get_assets(self):\n return self.assets\n","sub_path":"src/Character_assets.py","file_name":"Character_assets.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"314454565","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\"\n测试 TfUtils 的函数功能\n\n@version: v1.0 \n@author: Jeffrey Wang\n@license: Apache Licence \n@contact: shwangjj@163.com\n@file: Test_TfUtils.py \n@time: 2018/5/6 0006 下午 15:53 \n\"\"\"\nimport unittest\nfrom tools.TfUtils import TfUtils\nimport os\n\nclass TestTfUtils(unittest.TestCase):\n\n dirname = os.path.dirname(__file__)\n\n exp_text_filepath = dirname + \"//数据Excel文件_测试数据文件.txt\"\n\n def setUp(self):\n # 删除测试过程中可能生成的文件\n if os.path.exists(self.exp_text_filepath):\n os.remove(self.exp_text_filepath)\n\n def test_exportExcelToTextFile(self):\n # 测试将excel文件中内容,导出成 测试用数据文件\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, col_list=\"A,C,D\", txt_title_type=TfUtils.TITLE_SAME_AS_ABC)\n self.assertTrue(r)\n # 检查,生成的文件存在\n self.assertTrue(os.path.exists(text_file))\n # 检查,生成的文件有标题行\n c_file = open(self.exp_text_filepath, \"r\", encoding=\"UTF-8\")\n titleLine = c_file.readline()\n self.assertEqual(\"A|C|D\", titleLine)\n c_file.close()\n\n\n def test_exportExcelToTextFile_源文件不存在的情况(self):\n # 测试,源文件不存在的情况\n excel_file = self.dirname + \"//xxxx.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, col_list=\"A,C,D\")\n self.assertFalse(r)\n\n def test_exportExcelToTextFile_源文件中sheet不存在的情况(self):\n # 测试,sheet不存在的情况\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, sheet_name=\"xxxx\", col_list=\"A,C,D\")\n self.assertFalse(r)\n\n def test_exportExcelToTextFile_源文件中列名不存在的情况(self):\n # 测试,列不存在的情况\n excel_file = self.dirname + \"//数据Excel文件.xlsx\"\n text_file = self.exp_text_filepath\n\n r = TfUtils.exportExcelToTextFile(excel_file, text_file, sheet_name=\"xxxx\", col_list=\"A,C,D,777\")\n self.assertFalse(r)\n\n def test__getExcelColIndex(self):\n # 测试,根据ABC,���得excel列号的函数\n self.assertEqual([0,1,2,4,25], TfUtils._getExcelColIndex(\"A,B,C,E,Z\"))\n self.assertEqual([0,26], TfUtils._getExcelColIndex(\"A,AA\") )\n self.assertEqual([26], TfUtils._getExcelColIndex(\"AA\"))\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B,AB\"))\n # 有非法字符\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B, AB \"))\n self.assertEqual([1, 27], TfUtils._getExcelColIndex(\"B, AB0\"))","sub_path":"src_testcase/test_tools/Test_TfUtils.py","file_name":"Test_TfUtils.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"577715683","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as sp\nimport math\nimport random as rm\nimport NumerosGenerados as ng\nimport scipy.interpolate as si\nfrom Testsdiscretos import testPoisson\n\nmu = 6.3 \npoisson = sp.poisson(mu) \nxLine = np.arange(poisson.ppf(0.01),\n poisson.ppf(0.99))\nfmp = poisson.pmf(xLine)\nplt.plot(xLine, fmp, '--',color = \"red\")\nplt.vlines(xLine, 0, fmp, colors='b', lw=5, alpha=0.5,ec='black')\nplt.title('Distribución Poisson')\nplt.ylabel('probabilidad')\nplt.xlabel('valores')\nplt.show()\nprint(\"Media: \", round(np.mean(xLine),3))\nprint(\"Desvio: \", round(np.sqrt(np.var(xLine)),3))\nprint(\"Varianza: \", round(np.var(xLine),3))\n\n#----------Naylor----------\ncant = 10000\nrandomGCL = ng.generarNumeros(cant)\npoissons = []\n\ndef funPoisson(lamda):\n for i in range (cant):\n x = 0\n b = np.exp(-lamda)\n tr = 1\n r = rm.uniform(1,0)\n tr = tr * r\n while((tr-b)>=0):\n x = x + 1\n r = rm.uniform(1,0)\n tr = tr * r\n poissons.append(x)\n unicos, cuenta = np.unique(poissons, return_counts=True)\n frec = np.array(cuenta/cant)\n print(\"Media: \", round(np.mean(poissons),3))\n print(\"Desvio: \", round(np.sqrt(np.var(poissons)),3))\n print(\"Varianza: \", round(np.var(poissons),3))\n plt.title(\"Distribucion de Poisson\")\n print(unicos,cuenta)\n xnew = np.linspace(unicos.min(), unicos.max(), 300) \n spl = si.make_interp_spline(unicos, frec, k=3)\n frec_suavizada = spl(xnew)\n\n plt.plot(xLine, fmp, '--',color = \"violet\")\n plt.vlines(xLine, 0, fmp, colors='black', lw=5, alpha=0.5)\n\n plt.plot(xnew, frec_suavizada, '--', color = \"brown\")\n plt.bar(unicos, frec, width=0.2, alpha = 0.7)\n plt.show()\n\nfunPoisson(mu)\n\npoissonTeorica = np.random.poisson(mu,cant)\n\ntestPoisson(poissons,poissonTeorica)\n","sub_path":"TP2_2/Poisson.py","file_name":"Poisson.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"586434045","text":"c, n = list(map(int, input().split()))\npf = []\nnf = []\nfor _ in range(n):\n fl = int(input())\n if fl > 0:\n pf.append(fl)\n elif fl < 0:\n nf.append(fl)\n\nspf = sorted(pf, reverse=True)\nsnf = [abs(x) for x in sorted(nf)]\nmoves = 0\n\nfloors = [spf, snf]\nif len(spf) == 0 and len(snf) == 0:\n pass\nelif len(spf) == 0:\n moves -= snf[0]\nelif len(snf) == 0:\n moves -= spf[0]\nelse:\n moves -= max(spf[0], snf[0])\n \n\nfor i, l in enumerate(floors):\n while len(l) > 0:\n moves += l[0] * 2\n if c < len(l):\n l = l[c:]\n else:\n l = []\n\nseconds = moves * 20\nminutes = 0\nhours = 0\nif seconds >= 60:\n minutes = seconds // 60\n seconds = seconds % 60\nif minutes >= 60:\n hours = minutes // 60\n minutes = minutes % 60\n\nif hours % 24 < 3 or hours % 24 > 14:\n tod = 'AM'\nelse:\n tod = 'PM'\n\nhour = (9 + hours) % 12\nif hour == 0:\n hour = 12\nif hour < 10:\n hour = '0' + str(hour)\nif minutes < 10:\n minutes = '0' + str(minutes)\nif seconds < 10:\n seconds = '0' + str(seconds)\n\ntime = str(hour) + ':' + str(minutes) + ':' + str(seconds) + ' ' + tod\nprint(time)\n","sub_path":"elevator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"596314068","text":"import random\nimport string\n\n\nVOWELS = 'aeiouy'\nCONSONANTS = 'bcdfghjklmnpqrstvwxz'\n\n\ndef gen_word(minlength = 4, maxlength = 20):\n result = '' + random.choice(random.choice([CONSONANTS, VOWELS]))\n\n if(result[0] in VOWELS):\n odd_letters = VOWELS\n even_letters = CONSONANTS\n else:\n odd_letters = CONSONANTS\n even_letters = VOWELS\n\n for i in range(random.randint(minlength - 1, maxlength - 1)):\n if i % 2 == 1:\n result += random.choice(odd_letters)\n elif i % 2 == 0:\n result += random.choice(even_letters)\n\n return result\n","sub_path":"src/randstring.py","file_name":"randstring.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"505642989","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef replace_zeros(data: pd.DataFrame, cont_vars: list) -> pd.DataFrame:\n df = data.copy()\n for var in cont_vars:\n mean = df[var].mean()\n df.loc[df[var] == 0.0, var] = mean\n return df\n\n\ndef log_transform(data: pd.DataFrame, cont_vars: list) -> pd.DataFrame:\n X = data.copy()\n for var in cont_vars:\n if var not in [\"age\"]:\n X[var] = np.log(X[var])\n return X\n\n\ndef feature_scaling(data: pd.DataFrame, vars: list):\n df = data.copy()\n # fit scaler\n scaler = MinMaxScaler() # create an instance\n scaler.fit(df[vars])\n df = pd.concat([df['response'].reset_index(drop=True),\n pd.DataFrame(scaler.transform(data[vars]),\n columns=vars)], axis=1)\n\n return df\n","sub_path":"feature_eng.py","file_name":"feature_eng.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"605846836","text":"#!/usr/bin/env python3\n\nimport sys\nimport os.path\n\nfrom functools import reduce\nfrom collections import namedtuple, Counter\n\nAddress = namedtuple(\"Address\", [\"name\", \"v4\", \"v6\", \"lineno\"])\nService = namedtuple(\"Service\", [\"name\", \"tcp\", \"udp\", \"lineno\"])\nInterface = namedtuple(\"Interface\", [\"name\", \"zone\", \"protocols\", \"lineno\"])\nRule = namedtuple(\"Rule\", [\"srczone\", \"dstzone\", \"srcaddr\", \"dstaddr\", \"service\", \"action\", \"lineno\"])\nVirtual = namedtuple(\"Virtual\", [\"srczone\", \"extaddr\", \"intaddr\", \"extservice\", \"intservice\", \"lineno\"])\n\nETC_DIR = \"/etc/microfw\"\n\nif len(sys.argv) > 1:\n ETC_DIR = sys.argv[1]\n\n\ndef read_table(filename):\n columns = {\n \"addresses\": 3,\n \"services\": 3,\n \"interfaces\": 3,\n \"rules\": 6,\n \"virtual\": 5\n }\n\n types = {\n \"addresses\": Address,\n \"services\": Service,\n \"interfaces\": Interface,\n \"rules\": Rule,\n \"virtual\": Virtual\n }\n\n if filename not in columns:\n raise RuntimeError(\"table %s does not exist\" % filename)\n\n table = open(os.path.join(ETC_DIR, filename), \"r\")\n for lineno, line in enumerate(table, start=1):\n if not line.strip() or line.startswith(\"#\"):\n continue\n col_data = line.split()\n if len(col_data) != columns[filename]:\n raise ValueError(\n \"%s:%d (%s): Expected %d values, found %d\" % (\n filename, lineno, col_data[0],\n columns[filename], len(col_data)\n )\n )\n yield types[filename]( *(col_data + [lineno]) )\n\n\ndef chain_gen(cmd_gen, next_gen):\n # Take the results from the last step, and pipe every result\n # into the next step individually.\n for cmd in cmd_gen:\n yield from next_gen(cmd)\n\n\ndef printf(fmt, obj):\n \"\"\" Format a string using a namedtuple as args. \"\"\"\n print(fmt % obj._asdict())\n\n\ndef generate_setup():\n # Parse tables\n\n all_addresses = {\n address.name: address\n for address in read_table(\"addresses\")\n }\n all_services = {\n service.name: service\n for service in read_table(\"services\")\n }\n\n all_interfaces = list(read_table(\"interfaces\"))\n all_zones = set( iface.zone for iface in all_interfaces )\n all_rules = list(read_table(\"rules\"))\n all_virtuals = list(read_table(\"virtual\"))\n\n # Validate interfaces, rules and virtuals\n\n for interface in all_interfaces:\n if interface.zone in (\"FW\", \"ALL\"):\n raise ValueError(\n \"interfaces:%d (%s): Interface zone cannot be ALL or FW\" % (\n interface.lineno, interface.name\n )\n )\n\n for rule in all_rules:\n if rule.action not in (\"accept+nat\", \"accept\", \"reject\", \"drop\"):\n raise ValueError(\n \"rules:%d: Invalid action '%s'\" % (\n rule.lineno, rule.action\n )\n )\n if rule.srczone in (\"FW\", \"ALL\"):\n raise ValueError(\"rules:%d: Source Zone cannot be ALL or FW\" % rule.lineno)\n if rule.srczone not in all_zones:\n raise ValueError(\n \"rules:%d: Source zone '%s' does not exist\" % (\n rule.lineno, rule.dstzone\n )\n )\n if rule.dstzone not in all_zones | {\"FW\", \"ALL\"}:\n raise ValueError(\n \"rules:%d: Destination zone '%s' does not exist\" % (\n rule.lineno, rule.dstzone\n )\n )\n if rule.srcaddr != \"ALL\":\n if rule.srcaddr not in all_addresses:\n raise ValueError(\n \"rules:%d: Source Address '%s' does not exist\" % (\n rule.lineno, rule.srcaddr\n )\n )\n if rule.dstaddr != \"ALL\":\n if rule.dstaddr not in all_addresses:\n raise ValueError(\n \"rules:%d: Destination Address '%s' does not exist\" % (\n rule.lineno, rule.dstaddr\n )\n )\n if rule.service != \"ALL\":\n if rule.service not in all_services:\n raise ValueError(\n \"rules:%d: Service '%s' does not exist\" % (\n rule.lineno, rule.service\n )\n )\n\n for virtual in all_virtuals:\n if virtual.srczone in (\"FW\", \"ALL\"):\n raise ValueError(\n \"virtuals:%d: Source zone cannot be ALL or FW\" % virtual.lineno\n )\n if virtual.extaddr == \"ALL\":\n raise ValueError(\"virtuals:%d: External Address cannot be ALL\" % rule.lineno)\n if virtual.extaddr not in all_addresses:\n raise ValueError(\n \"virtuals:%d: External Address '%s' does not exist\" % (\n virtual.lineno, virtual.extaddr\n )\n )\n if virtual.intaddr == \"ALL\":\n raise ValueError(\"virtuals:%d: Internal Address cannot be ALL\" % rule.lineno)\n if virtual.intaddr not in all_addresses:\n raise ValueError(\n \"virtuals:%d: Internal Address '%s' does not exist\" % (\n virtual.lineno, virtual.intaddr\n )\n )\n if \"ALL\" in (virtual.extservice, virtual.intservice):\n if virtual.extservice != virtual.intservice:\n raise ValueError(\n \"virtuals:%d: When setting one service to ALL, the other must also be ALL\" % (\n virtual.lineno\n )\n )\n if virtual.extservice != \"ALL\":\n if virtual.extservice not in all_services:\n raise ValueError(\n \"virtuals:%d: External Service '%s' does not exist\" % (\n virtual.lineno, virtual.extservice\n )\n )\n if virtual.intservice != \"ALL\":\n if virtual.intservice not in all_services:\n raise ValueError(\n \"virtuals:%d: Internal Service '%s' does not exist\" % (\n virtual.lineno, virtual.intservice\n )\n )\n\n # For address and service tables, figure out which entries are actually _used_\n\n used_addresses = set(\n all_addresses[rule.srcaddr] for rule in all_rules if rule.srcaddr != \"ALL\"\n ) | set(\n all_addresses[rule.dstaddr] for rule in all_rules if rule.dstaddr != \"ALL\"\n ) | set(\n all_addresses[virtual.extaddr] for virtual in all_virtuals if virtual.extaddr != \"ALL\"\n ) | set(\n all_addresses[virtual.intaddr] for virtual in all_virtuals if virtual.intaddr != \"ALL\"\n )\n\n used_services = set(\n all_services[rule.service] for rule in all_rules if rule.service != \"ALL\"\n ) | set(\n all_services[virtual.extservice] for virtual in all_virtuals if virtual.extservice != \"ALL\"\n ) | set(\n all_services[virtual.intservice] for virtual in all_virtuals if virtual.intservice != \"ALL\"\n )\n\n # Now let's generate a bash script.\n\n print(\"#!/bin/bash\")\n print(\"set -e\")\n print(\"set -u\")\n print(\"\")\n\n # Generate ipsets for the entries we're going to use\n\n for address in sorted(used_addresses, key=lambda x: x.name):\n if address.v4 != '-':\n printf(\"ipset create '%(name)s_v4' hash:net family inet hashsize 1024 maxelem 65536\", address)\n printf(\"ipset add '%(name)s_v4' '%(v4)s'\", address)\n if address.v6 != '-':\n printf(\"ipset create '%(name)s_v6' hash:net family inet6 hashsize 1024 maxelem 65536\", address)\n printf(\"ipset add '%(name)s_v6' '%(v6)s'\", address)\n\n for service in sorted(used_services, key=lambda x: x.name):\n if service.tcp != '-':\n printf(\"ipset create '%(name)s_tcp' bitmap:port range 1-65535\", service)\n printf(\"ipset add '%(name)s_tcp' '%(tcp)s'\", service)\n if service.udp != '-':\n printf(\"ipset create '%(name)s_udp' bitmap:port range 1-65535\", service)\n printf(\"ipset add '%(name)s_udp' '%(udp)s'\", service)\n\n print(\"\")\n\n # Generate implicit accept rules for lo, icmp and related\n\n print(\"iptables -A INPUT -i lo -j ACCEPT\")\n print(\"ip6tables -A INPUT -i lo -j ACCEPT\")\n\n print(\"iptables -A INPUT -p icmp -j ACCEPT\")\n print(\"iptables -A FORWARD -p icmp -j ACCEPT\")\n\n print(\"ip6tables -A INPUT -p icmpv6 -j ACCEPT\")\n print(\"ip6tables -A FORWARD -p icmpv6 -j ACCEPT\")\n\n print(\"iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n print(\"ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n\n print(\"iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n print(\"ip6tables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\")\n\n # Generate action chains\n\n print(\"iptables -N accept\")\n print(\"iptables -A accept -j ACCEPT\")\n\n print(\"iptables -N drop\")\n print(\"iptables -A drop -j DROP\")\n\n print(\"iptables -N reject\")\n print(\"iptables -A reject -m addrtype --src-type BROADCAST -j DROP\")\n print(\"iptables -A reject -s 224.0.0.0/4 -j DROP\")\n print(\"iptables -A reject -p igmp -j DROP\")\n print(\"iptables -A reject -p tcp -j REJECT --reject-with tcp-reset\")\n print(\"iptables -A reject -p udp -j REJECT --reject-with icmp-port-unreachable\")\n print(\"iptables -A reject -p icmp -j REJECT --reject-with icmp-host-unreachable\")\n print(\"iptables -A reject -j REJECT --reject-with icmp-host-prohibited\")\n\n print(\"ip6tables -N accept\")\n print(\"ip6tables -A accept -j ACCEPT\")\n\n print(\"ip6tables -N drop\")\n print(\"ip6tables -A drop -j DROP\")\n\n print(\"ip6tables -N reject\")\n print(\"ip6tables -A reject -p tcp -j REJECT --reject-with tcp-reset\")\n print(\"ip6tables -A reject -j REJECT --reject-with icmp6-adm-prohibited\")\n\n # Generate zone-specific chains\n\n for zone in sorted(all_zones):\n print(\"iptables -N '%s_inp'\" % zone)\n print(\"ip6tables -N '%s_inp'\" % zone)\n print(\"iptables -N '%s_fwd'\" % zone)\n print(\"ip6tables -N '%s_fwd'\" % zone)\n\n # Generate rules to route traffic from INPUT and FORWARD to those chains\n\n for interface in all_interfaces:\n if interface.protocols != \"-\":\n for proto in interface.protocols.split(\",\"):\n print(\"iptables -A INPUT -i '%s' -p '%s' -j ACCEPT\" % (interface.name, proto))\n print(\"ip6tables -A INPUT -i '%s' -p '%s' -j ACCEPT\" % (interface.name, proto))\n\n # Route incoming traffic to zone-specific input chains\n printf(\"iptables -A INPUT -i '%(name)s' -j '%(zone)s_inp'\", interface)\n printf(\"ip6tables -A INPUT -i '%(name)s' -j '%(zone)s_inp'\", interface)\n\n # We will never allow hairpin traffic though (traffic cannot be\n # forwarded out the same interface where it came in)\n printf(\"iptables -A FORWARD -i '%(name)s' -o '%(name)s' -j drop\", interface)\n printf(\"ip6tables -A FORWARD -i '%(name)s' -o '%(name)s' -j drop\", interface)\n\n # Route incoming traffic to zone-specific forward chains\n printf(\"iptables -A FORWARD -i '%(name)s' -j '%(zone)s_fwd'\", interface)\n printf(\"ip6tables -A FORWARD -i '%(name)s' -j '%(zone)s_fwd'\", interface)\n\n # Generate rules to implement filtering\n\n for rule in all_rules:\n # cmd is a dictionary that contains all the necessary building blocks for\n # an iptables command.\n # We're gonna pass it through a bunch of generators that each yield a\n # number of combinations for ipv4/ipv6 addresses, tcp/udp services and\n # accept/masquerade rules.\n # So the number of combinations grows with each step along the way.\n # At the end, every combination gets passed into render_cmd which\n # turns it into a string.\n\n def iptables(cmd=None):\n yield dict(cmd=\"iptables\")\n yield dict(cmd=\"ip6tables\")\n\n def chains(cmd):\n # Find out which input/forward chains we need to use\n if rule.dstzone == \"ALL\":\n dstzones = all_zones | {\"FW\"}\n else:\n dstzones = [rule.dstzone]\n\n for dstzone in dstzones:\n # Destination ALL or FW: goto _inp\n if dstzone in (\"FW\", \"ALL\"):\n yield dict(cmd,\n chain=\"%s_inp\" % rule.srczone,\n iface=\"\"\n )\n\n # Destination ALL or specific zone: goto _fwd\n for interface in all_interfaces:\n if dstzone in (interface.zone, \"ALL\"):\n yield dict(cmd,\n chain=\"%s_fwd\" % rule.srczone,\n iface=interface.name\n )\n\n def address(addr, direction):\n def _filter_addr(cmd):\n if addr == \"ALL\":\n yield cmd\n elif cmd[\"cmd\"] == \"iptables\":\n if all_addresses[addr].v4 != '-':\n yield dict(cmd, **{ \"%saddr\" % direction : \"%s_v4\" % addr })\n else:\n if all_addresses[addr].v6 != '-':\n yield dict(cmd, **{ \"%saddr\" % direction : \"%s_v6\" % addr })\n\n return _filter_addr\n\n def service(cmd):\n if rule.service == \"ALL\":\n yield cmd\n else:\n if all_services[rule.service].tcp != '-':\n yield dict(cmd, service='%s_tcp' % rule.service, proto=\"tcp\")\n if all_services[rule.service].udp != '-':\n yield dict(cmd, service='%s_udp' % rule.service, proto=\"udp\")\n\n def action(cmd):\n action = \"accept\" if rule.action == \"accept+nat\" else rule.action\n yield dict(cmd, action=action)\n\n def masq(cmd):\n yield cmd\n if rule.action == \"accept+nat\":\n yield dict(cmd, table=\"nat\", chain=\"POSTROUTING\", action=\"MASQUERADE\")\n\n def render_cmd(cmd):\n fmt = \"%(cmd)-9s \"\n if cmd.get(\"table\"):\n fmt += \"-t '%(table)s' \"\n fmt += \"-A '%(chain)s' \"\n if cmd.get(\"iface\"):\n fmt += \"-o '%(iface)s' \"\n if cmd.get(\"srcaddr\"):\n fmt += \"-m set --match-set '%(srcaddr)s' src \"\n if cmd.get(\"dstaddr\"):\n fmt += \"-m set --match-set '%(dstaddr)s' dst \"\n if cmd.get(\"service\"):\n fmt += \"-p '%(proto)s' -m set --match-set '%(service)s' dst \"\n fmt += \"-j %(action)s\"\n yield fmt % cmd\n\n # Create a pipeline of steps ready to be consumed by reduce.\n # The first element we need to invoke manually.\n # The others are invoked by chain_gen.\n pipeline = [\n iptables(),\n chains,\n address(rule.srcaddr, \"src\"),\n address(rule.dstaddr, \"dst\"),\n service,\n action,\n masq,\n render_cmd\n ]\n\n # Now reduce() the pipeline to generate the actual commands.\n for command in reduce(chain_gen, pipeline):\n print(command)\n\n\n # Generate rules to implement virtual services\n\n for virtual in all_virtuals:\n def iptables(cmd=None):\n yield dict(cmd=\"iptables\")\n yield dict(cmd=\"ip6tables\")\n\n def interfaces(cmd):\n for interface in all_interfaces:\n if interface.zone == virtual.srczone:\n yield dict(cmd, iface=interface.name)\n\n def address(addr, which_one):\n def _filter_addr(cmd):\n if cmd[\"cmd\"] == \"iptables\":\n if all_addresses[addr].v4 != '-':\n yield dict(cmd, **{ \"%saddr\" % which_one : all_addresses[addr].v4 })\n else:\n if all_addresses[addr].v6 != '-':\n yield dict(cmd, **{ \"%saddr\" % which_one : all_addresses[addr].v6 })\n return _filter_addr\n\n def service(service, which_one):\n def _filter_service(cmd):\n if service == \"ALL\":\n yield cmd\n else:\n if all_services[service].tcp != '-':\n yield dict(cmd, proto=\"tcp\", **{\n \"%sservice\" % which_one : all_services[service].tcp,\n })\n if all_services[service].udp != '-':\n yield dict(cmd, proto=\"udp\", **{\n \"%sservice\" % which_one : all_services[service].udp,\n })\n return _filter_service\n\n def render_cmd(cmd):\n fmt_dnat = \"%(cmd)s -t 'nat' -A 'PREROUTING' -i '%(iface)s' -d '%(extaddr)s' \"\n fmt_fltr = \"%(cmd)s -t 'filter' -A 'FORWARD' -i '%(iface)s' -d '%(intaddr)s' \"\n\n if cmd.get(\"extservice\"):\n fmt_dnat += \"-p '%(proto)s' -m '%(proto)s' --dport '%(extservice)s' \"\n fmt_fltr += \"-p '%(proto)s' -m '%(proto)s' --dport '%(intservice)s' \"\n\n if virtual.intservice == virtual.extservice:\n fmt_dnat += \"-j DNAT --to-destination '%(intaddr)s'\"\n fmt_fltr += \"-j ACCEPT\"\n else:\n fmt_dnat += \"-j DNAT --to-destination '%(intaddr)s:%(intservice)s'\"\n fmt_fltr += \"-j ACCEPT\"\n\n yield fmt_dnat % cmd\n yield fmt_fltr % cmd\n\n pipeline = [\n iptables(),\n interfaces,\n address(virtual.extaddr, \"ext\"),\n address(virtual.intaddr, \"int\"),\n service(virtual.extservice, \"ext\"),\n service(virtual.intservice, \"int\"),\n render_cmd\n ]\n\n # Now reduce() the pipeline to generate the actual commands.\n for command in reduce(chain_gen, pipeline):\n print(command)\n\n # Generate last-resort reject rules\n\n print(\"iptables -A INPUT -j reject\")\n print(\"ip6tables -A INPUT -j reject\")\n print(\"iptables -A FORWARD -j reject\")\n print(\"ip6tables -A FORWARD -j reject\")\n\n\n\nif __name__ == '__main__':\n generate_setup()\n\n","sub_path":"src/generate_setup.py","file_name":"generate_setup.py","file_ext":"py","file_size_in_byte":18416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"421654868","text":"from django.conf.urls.defaults import patterns, url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\n\nurlpatterns = patterns('app.asphodel.views',\n url(r'^$', 'user_status'),\n url(r'^game/new/?$', 'new_game'),\n url(r'^game/(?P\\d+)/?$', 'game'),\n\n url(r'(?P\\d+)/lanes/build$', 'build_lane'),\n \n url(r'(?P\\d+)/ships/move$', 'move_ship'),\n url(r'(?P\\d+)/ships/build$', 'build_ship'),\n url(r'(?P\\d+)/ships/deploy$', 'deploy_ship'),\n\n url(r'(?P\\d+)/tech/invest$', 'invest_tech'),\n \n url(r'(?P\\d+)/change_phase$', 'change_phase'),\n url(r'(?P\\d+)/done_turn$', 'done_turn'),\n url(r'(?P\\d+)/zoom$', 'zoom'),\n url(r'(?P\\d+)/combat$', 'combat'),\n url(r'(?P\\d+)/check_turn$', 'check_turn'),\n \n url(r'test_js', TemplateView.as_view(template_name=\"asphodel/test_js.html\")),\n)\n \n","sub_path":"app/asphodel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"530720037","text":"from tkinter import *\nfrom dipy.viz import fvtk\nfrom dipy.tracking.distances import bundles_distances_mam\nfrom nibabel import trackvis\nfrom dipy.tracking.utils import length\nimport numpy as np\nimport nibabel\nimport os\nimport vtk.util.colors as colors\nimport _tkinter\nimport matplotlib.pyplot as plt\n\n\n\n\n\ndef loadtrkfile(T_filename, threshold_short_streamlines=10.0):\n \"\"\"Load tractogram from TRK file and remove short streamlines with\n length below threshold.\n \"\"\"\n print(\"Loading %s\" % T_filename)\n T, hdr = trackvis.read(T_filename, as_generator=False)\n T = np.array([s[0] for s in T], dtype=np.object)\n \n\n \n return T, hdr\n\n\n\ndef show_tract(segmented_tract, color):\n ren = fvtk.ren() \n fvtk.add(ren, fvtk.line(segmented_tract.tolist(),colors=color, linewidth=2,opacity=0.3))\n fvtk.show(ren)\n fvtk.clear(ren)\n\n\n\n\ndef countstreamlines():\n print(\"total %s streamlines\" % ( len(T_A)))\n\n\n\ndef showhistogram():\n lengths = list(length(T_A))\n fig_hist, ax = plt.subplots()\n ax.hist(lengths, color='burlywood')\n ax.set_xlabel('Length')\n ax.set_ylabel('Count')\n plt.show()\n\ndef load():\n T_A, hdr = loadtrkfile(T_A_filename, threshold_short_streamlines=threshold_short_streamlines) \n \n\nif __name__ == '__main__':\n \n print(__doc__)\n np.random.seed(0)\n\n T_A_filename = 'F:\\Thesis\\Resources\\CST_L.trk'\n \n \n threshold_short_streamlines = 0.0 \n\n \n \n color=colors.red\n T_A, hdr = loadtrkfile(T_A_filename, threshold_short_streamlines=threshold_short_streamlines) \n \n \n root = Tk()\n Frame= Frame(root)\n Frame.pack(fill=X)\n\n\n button1=Button(Frame,text=\"Load Tract\",fg=\"blue\",command=load)\n button2=Button(Frame,text=\"Show Tract\",fg=\"blue\",command=show_tract(T_A,color))\n button3=Button(Frame,text=\"Streamlines Count\",fg=\"blue\",command=countstreamlines)\n button4=Button(Frame,text=\"Show histogram\",fg=\"blue\",command=showhistogram)\n\n button1.pack(fill=X)\n button2.pack(fill=X)\n button3.pack(fill=X)\n button4.pack(fill=X)\n\n\n\n\n root.mainloop()\n\n\n","sub_path":"firsttask.py","file_name":"firsttask.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"173052328","text":"from datetime import datetime\n\nOFF = 0\nINFO = 5\nDEBUG = 10\nWARNING = 15\nERROR = 20\nCRITICAL = 25\n\nlevels = {}\nlevels[\"OFF\"] = OFF\nlevels[\"INFO\"] = INFO\nlevels[\"DEBUG\"] = DEBUG\nlevels[\"WARNING\"] = WARNING\nlevels[\"ERROR\"] = ERROR\nlevels[\"CRITICAL\"] = CRITICAL\n\ndef isInfo(level):\n return isDebug(level) or level == INFO\n\ndef isDebug(level):\n return isWarning(level) or level == DEBUG\n\ndef isWarning(level):\n return isError(level) or level == WARNING\n\ndef isError(level):\n return isCritical(level) or level == ERROR\n\ndef isCritical(level):\n return level == CRITICAL\n\ndef _getTime():\n return str(datetime.now())[:22]\n\nclass logger():\n def __init__(self, script=\"Main\", file=INFO, screen=INFO, path=\"log.txt\"):\n self.script = script\n self.file = file\n self.screen = screen\n self.path = path\n\n def new(self):\n f = open(self.path, \"w\")\n f.close()\n\n def _construct(self, level, message):\n return _getTime() + \" - \" + level + \" [\" + self.script + \"] \" + message\n\n def info(self, message):\n temp = self._construct(\"INFO\", message)\n if isInfo(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isInfo(self.screen):\n print(temp)\n \n\n def debug(self, message):\n temp = self._construct(\"DEBUG\", message)\n if isDebug(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isDebug(self.screen):\n print(temp)\n\n def warning(self, message):\n temp = self._construct(\"WARNING\", message)\n if isWarning(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isWarning(self.screen):\n print(temp)\n\n def error(self, message):\n temp = self._construct(\"ERROR\", message)\n if isError(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isError(self.screen):\n print(temp)\n\n def critical(self, message):\n temp = self._construct(\"CRITICAL\", message)\n if isCritical(self.file):\n f = open(self.path, \"a\")\n f.write(temp + \"\\n\")\n f.close()\n if isCritical(self.screen):\n print(temp)\n","sub_path":"python_src/engine/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"330925049","text":"boys = ['Peter', 'Alex', 'John', 'Arthur', 'Richard']\ngirls = ['Kate', 'Liza', 'Kira', 'Emma', 'Trisha']\n\ndef pairs_list(list1, list2):\n if len(list1) != len(list2):\n print(\"К сожалению один список короче другого, следовательно, кто-то останется без пары\")\n return 0\n else:\n ideal_pairs = zip(list1, list2)\n print(\"Идеальные пары:\")\n for pair in ideal_pairs:\n print(pair[0], \"и\", pair[1])\n\npairs_list(sorted(boys),sorted(girls))\n","sub_path":"dating.py","file_name":"dating.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"407645048","text":"from sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nfrom graphs.graphs import *\n\n\ndef build_affinity_matrix(embed_matrix):\n \n aff_mat = None\n \n return aff_mat\n\n\ndef KNN(X, y, n):\n l = len(y)\n y_hat = []\n \n for i in range(l): \n X_train = np.delete(X, i, axis = 0) \n y_train = np.delete(y, i, axis = 0) \n \n neigh = KNeighborsClassifier(n_neighbors = n, n_jobs = -1)\n neigh.fit(X_train, y_train)\n y_hat.extend(neigh.predict(X[i].reshape(1,-1)))\n \n print(\"%d-NN\" %n)\n print(sum(np.array(y_hat) == y) / l)\n return sum(np.array(y_hat) == y) / l\n\ndef KNN_large(X, y, n):\n l = len(y)\n y_hat = []\n \n for i in range(0,l,20):\n \n batch_size = 20\n if i + 20 >= l:\n batch_size = l - i\n \n to_predict = X[i:i+batch_size, :]#.reshape(1,-1)\n X_train = np.delete(X, np.arange(i,i + batch_size), axis = 0)\n y_train = np.delete(y, np.arange(i,i + batch_size), axis = 0)\n \n neigh = KNeighborsClassifier(n_neighbors = n)\n neigh.fit(X_train, y_train)\n \n y_hat.extend(neigh.predict(to_predict))\n \n print(\"%d-NN\" %n)\n print(sum(np.array(y_hat) == y) / l)\n\n \ndef prepare_adj(df, method = 'gaussian', sig = 1, alpha = 1, delta = 20, lazy_flag = True):\n\n \"\"\"\n Input: Adjacency matrix or feature matrix with the last column including the labels\n Output: Row normalized gaussian kernel similarity matrix\n \"\"\"\n X = df.values[:,:-1] #consider X a graph or a feature matrix, both fine\n np.fill_diagonal(X,0) #set diagonal to zero / remove self loops\n Q_index = range(X.shape[0]) # for now always use this\n\n dis = distanceEuclidean(X, Q_index, n_jobs=-1)\n similarity = kerGauss(dis, sigma = sig) #try different sigma\n\n # origianl similarity matrix, using gaussian kernel, row normalize\n if method == 'gaussian':\n graph = RandomWalkNormalize(similarity)\n \n elif method == 'MSTKNN':\n A_KNN = MSTKNN(dis,Q_index,delta,n_jobs=-1,spanning=True)\n A_KNN_ker = A_KNN*similarity\n graph = RandomWalkNormalize(A_KNN_ker)\n \n elif method == 'nnlsw':\n A_KNN = MSTKNN(dis,Q_index,delta,n_jobs=-1,spanning=True)\n graph = multicoreNNLS(X,A_KNN,Q_index,n_jobs=-1)\n \n if lazy_flag:\n graph = lazy(graph, alpha= alpha) # convert to lazy\n \n return graph\n\ndef apply_laplacian(graph):\n\n graph = Symmetricalize(graph)\n graph = LaplacianFilter(graph)\n graph = graph.toarray()\n \n return graph\n\ndef get_train_and_val_mask(train_mask, val_size):\n \n \"\"\"\n Input: Indices of the p rows in the Domain matrix\n \n split the training set into training and validation sets\n train_ind and val_ind are used to filter the base_embeddings during training\n \"\"\"\n p = len(train_mask)\n indices = np.arange(p)\n np.random.shuffle(indices)\n \n train_lim = int((1-val_size)* p) # 90% training, 10% validation\n \n train_ind = [indices[i] for i in range(train_lim)]\n val_ind = [indices[i] for i in range(train_lim, p)]\n \n val_mask = [train_mask[i] for i in val_ind]\n train_mask = [train_mask[i] for i in train_ind]\n \n return train_mask, val_mask, train_ind, val_ind\n ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"226407804","text":"# coding:iso-8859-9 Türkçe\r\n# p_20902.py: Grafik yumruları ve bağlantıları, doğrusal ve dairesel grafik çıktıları örneği.\r\n\r\nimport networkx as nwx\r\nimport matplotlib.pyplot as pp\r\n\r\ngrafik1 = nwx.path_graph (6) # grafik1 dizilimi: [5<--0], sağdan-sola...\r\nşehirler = {0:\"Edirne\", 1:\"İstanbul\", 2:\"Ankara\", 3:\"Malatya\", 4:\"Sivas\", 5:\"Kars\"}\r\ngrafik2 = nwx.relabel_nodes (grafik1, şehirler) # grafik2 dizilimi: [Kars<--Edirne], sağdan-sola\r\n# grafik2 yeniden yaratılır, grafik1 etkilenmez...\r\n# Grafiklerde maalesef isimler görünmüyor, MS Paint ile etiketlenmeli...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\n\r\nprint (\"\\nGrafik2'in yumruları:\", grafik2.nodes() )\r\nprint (\"Grafik2'in bağlantıları:\", grafik2.edges() )\r\n\r\nnwx.draw (grafik2)\r\n#pp.savefig (\"p_20902a.png\")\r\npp.show()\r\n\r\nprint (\"-\"*75)\r\n#--------------------------------------------------------------------------------------------------\r\n\r\nharitalama1 = dict (zip (grafik1.nodes(), \"abc\"))\r\nnwx.relabel_nodes (grafik1, haritalama1, copy=False) # grafik1 yeni kısagelen isimlerle değiştirilir...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\nprint (\"-\"*75)\r\n#--------------------------------------------------------------------------------------------------\r\n\r\n\r\ngrafik1 = nwx.path_graph (6)\r\nharitalama2 = dict (zip (grafik1.nodes(), (100,101,102,103,104,105) ))\r\nnwx.relabel_nodes (grafik1, haritalama2, copy=False) # grafik1 haritalama fonksiyonuyla tamamen ismen değişir...\r\ngrafik1.add_edge (100, 105) # Sondan başa bağlantı...\r\n\r\nprint (\"Grafik1'in yumruları:\", grafik1.nodes() )\r\nprint (\"Grafik1'in bağlantıları:\", grafik1.edges() )\r\n\r\nnwx.draw (grafik1)\r\n#pp.savefig (\"p_20902b.png\")\r\npp.show()\r\n\r\n\r\n\r\n\"\"\"Çıktı:\r\n>python p_20902.py\r\nGrafik1'in yumruları: [0, 1, 2, 3, 4, 5]\r\nGrafik1'in bağlantıları: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]\r\n\r\nGrafik2'in yumruları: ['Edirne', 'İstanbul', 'Ankara', 'Malatya', 'Sivas', 'Kars']\r\nGrafik2'in bağlantıları: [('Edirne', 'İstanbul'), ('İstanbul', 'Ankara'), ('Ankara', 'Malatya'), ('Malatya', 'Sivas'), ('Sivas', 'Kars')]\r\n\r\nnx_pylab.py:579: MatplotlibDeprecationWarning:\r\nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead. if not cb.iterable(width):\r\n---------------------------------------------------------------------------\r\n\r\nGrafik1'in yumruları: [3, 4, 5, 'a', 'b', 'c']\r\nGrafik1'in bağlantıları: [(3, 4), (3, 'c'), (4, 5), ('a', 'b'), ('b', 'c')]\r\n---------------------------------------------------------------------------\r\n\r\nGrafik1'in yumruları: [100, 101, 102, 103, 104, 105]\r\nGrafik1'in bağlantıları: [(100, 101), (100, 105), (101, 102), (102, 103), (103,104), (104, 105)]\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_20902.py","file_name":"p_20902.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"461246903","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nmatplotlib.use(\"Agg\")\r\nimport pylab\r\nfrom collections import deque\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.optimizers import RMSprop\r\nfrom ple import PLE\r\nfrom ple.games.catcher import Catcher\r\nfrom pygame.constants import K_a, K_d\r\n\r\nos.putenv('SDL_VIDEODRIVER', 'fbcon')\r\nos.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\r\nEPISODES = 100000\r\nnp.random.seed(0)\r\n\r\n\r\ndef process_state(state):\r\n return np.array([state.values()])\r\n\r\n\r\nclass DQNAgent:\r\n def __init__(self, env):\r\n self.env = env\r\n self.memory = deque(maxlen=200000)\r\n self.gamma = 0.99\r\n self.epsilon = 1\r\n self.epsilon_min = 0.05\r\n self.epsilon_decay = 0.9984\r\n self.learning_rate = 1e-5\r\n self._build_model()\r\n\r\n def _build_model(self):\r\n model = Sequential()\r\n model.add(Dense(100, input_dim=4, activation='tanh', init='he_uniform'))\r\n model.add(Dense(100, activation='tanh', init='he_uniform'))\r\n model.add(Dense(3, activation='linear', init='he_uniform'))\r\n model.compile(loss='mse',\r\n optimizer=RMSprop(lr=self.learning_rate))\r\n self.model = model\r\n\r\n\r\n\r\n def remember(self, state, action, reward, next_state, done): #메모리 저장\r\n self.memory.append((state, action, reward, next_state, done))\r\n\r\n def act(self, state):\r\n if np.random.rand() <= self.epsilon:\r\n return np.random.choice([K_a, None, K_d])\r\n act_values = self.model.predict(state)\r\n # print (act_values)\r\n return [K_a, None, K_d][np.argmax(act_values[0])]\r\n\r\n def replay(self, batch_size):\r\n if len(self.memory) < 120000: #메모리 사이즈가 120000 이하면 학습 안함\r\n return\r\n batchs = np.random.choice(len(self.memory), batch_size, replace=False) #배치사이즈만큼 메모리 랜덤하게 가져오기\r\n states, targets = [], []\r\n for i in batchs:\r\n state, action, reward, next_state, done = self.memory[i]\r\n #if not done:\r\n target = reward + self.gamma * \\\r\n np.amax(self.model.predict(next_state)[0])\r\n\r\n target_f = self.model.predict(state)\r\n target_f[0][action] = target\r\n states.append(state[0])\r\n targets.append(target_f[0])\r\n states = np.array(states)\r\n targets = np.array(targets)\r\n self.model.fit(states, targets, nb_epoch=1, verbose=0) # 학습하기\r\n if self.epsilon > self.epsilon_min:\r\n self.epsilon *= self.epsilon_decay\r\n\r\n def load(self, name): # 학습된 네트워크 로드\r\n self.model.load_weights(name)\r\n\r\n def save(self, name): # 네트워크 저장\r\n self.model.save_weights(name)\r\n\r\nif __name__ == \"__main__\":\r\n game = Catcher(width=320, height=320)\r\n env = PLE(game, display_screen=True, state_preprocessor=process_state)\r\n agent = DQNAgent(env)\r\n agent.load(\"./save/catcher.h5\")\r\n\r\n #초기화\r\n #pylab.title(\"reward\")\r\n #pylab.xlabel(\"episodes\")\r\n #pylab.ylabel(\"rewards\")\r\n env.init()\r\n scores, time = [], []\r\n for e in range(EPISODES):\r\n\r\n env.reset_game()\r\n state = env.getGameState()\r\n state = np.array([list(state[0])])\r\n score = 0\r\n for time_t in range(20000):\r\n action = agent.act(state)\r\n\r\n reward = env.act(action) #액션 선택\r\n score += reward\r\n\r\n next_state = env.getGameState()\r\n next_state = np.array([list(next_state[0])])\r\n\r\n action = [K_a, None, K_d].index(action)\r\n\r\n agent.remember(state, action, reward, next_state, env.game_over())\r\n state = next_state\r\n\r\n if env.game_over() or time_t == 19999:\r\n #에피소드가 끝나면 출력\r\n print(\"episode: {}/{}, score: {}, memory size: {}, e: {}\"\r\n .format(e, EPISODES, score,\r\n len(agent.memory), agent.epsilon))\r\n\r\n #리워드 플랏을 위한 코드\r\n scores.append(score)\r\n time.append(e+1)\r\n if e % 10 == 0:\r\n pylab.plot(time, scores, 'b')\r\n pylab.savefig(\"./save/catcher_dqn.png\")\r\n break\r\n\r\n if e % 100 == 0:\r\n agent.save(\"./save/catcher.h5\")\r\n\r\n if time_t % 4 == 3:\r\n agent.replay(32)\r\n","sub_path":"deep-q-learning/catcher_DQN_plot.py","file_name":"catcher_DQN_plot.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"64041197","text":"# from sqlalchemy.orm import Session, session\n\nfrom api import models, schema\n\nfrom api.db import bookingslips, database\n\n\n\n###########################BOOKING SLIP#####################################\n\nasync def add_slip(source='', destination='', booking_code='', new_booking_code=''):\n query = bookingslips.insert().values(source=source, destination=destination, booking_code=booking_code, new_booking_code=new_booking_code)\n return await database.execute(query=query)\n\nasync def get_slip(booking_code):\n query = bookingslips.select(bookingslips.c.booking_code==booking_code)\n return await database.fetch_one(query=query)\n\nasync def get_slip_detail(source, destination, booking_code):\n query = bookingslips.select().where(bookingslips.c.source==source).where(bookingslips.c.destination==destination).where(bookingslips.c.booking_code==booking_code)\n return await database.fetch_one(query=query)\n\nasync def get_slips(skip: int = 0, limit: int = 10):\n query = bookingslips.select().order_by(bookingslips.c.id.desc()).offset(skip).limit(limit)\n return await database.fetch_all(query=query)\n\nasync def delete_slip(id: int):\n query = bookingslips.delete().where(bookingslips.c.id==id)\n return await database.execute(query=query)\n\nasync def update_slip(id: int, payload: schema.BookingSlipCreate): #I can't think of a possible usecase for now\n query = (\n bookingslips\n .update()\n .where(bookingslips.c.id == id)\n .values(**payload.dict())\n )\n return await database.execute(query=query)\n\n\n\n###########################BOOKING SLIP#####################################\n\n# async def get_slips(db: Session, skip: int = 0, limit: int = 100):\n# return db.query(models.BookingSlip).offset(skip).limit(limit).all()\n\n# async def get_slip(booking_code: str):\n# query = models.BookingSlip.filter(models.BookingSlip.booking_code == booking_code).first()\n# return await database.execute(query=query)\n# async def get_slip_detail(db: Session, booking_code: str, source: str, destination: str):\n# return db.query(models.BookingSlip).filter(models.BookingSlip.booking_code == booking_code)\\\n# .filter(models.BookingSlip.source == source).filter(models.BookingSlip.destination == destination).first()\n\n# async def create_slip(db: Session, source='', destination='', booking_code='', new_booking_code=''):\n# db_slip = models.BookingSlip(source=source, destination=destination, booking_code=booking_code, new_booking_code=new_booking_code)\n# db.add(db_slip)\n# db.commit()\n# db.refresh(db_slip)\n# return db_slip\n \n# async def create_slip_convert(db: Session, _convert: schema.ConvertedSlipCreate, bookingslip_id: int):\n# db_convert = models.ConvertedSlip(**_convert.dict(), booking_slip_id=bookingslip_id)\n# db.add(db_convert)\n# db.commit()\n# db.refresh(db_convert)\n# return db_convert\n\n","sub_path":"api/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"99195938","text":"#original idea from https://gist.github.com/frederic/cd56923c8af46ae44fd5\n#GPLv3\n\nimport struct #struct kinda sucks\nimport sys\n\n# /* msm8960 bootloader.img format */\n# #define BOOTLDR_MAGIC \"BOOTLDR!\"\n# #define BOOTLDR_MAGIC_SIZE 8\n#\n# typedef struct {\n# \tchar name[64];\n# \tuint32_t size;\n# }img_info_s;\n#\n# typedef struct {\n# \tchar magic[BOOTLDR_MAGIC_SIZE];\n# \tuint32_t num_images;\n# \tuint32_t start_offset;\n# \tuint32_t bootldr_size;\n# \timg_info_s img_info[];\n# }bootloader_images_header;\n\ncomplete_bootloader = open('binaries/bootloader-mako-makoz30f.img','rb').read()\nmagic = complete_bootloader[0:8]\n\nif (magic != bytes(\"BOOTLDR!\",'ascii')):\n print(\"magic was supposed to be BOOTLDR!, got {0} instead\".format(magic))\n sys.exit(1)\n\nnumber_of_images = int.from_bytes(complete_bootloader[8:12], byteorder='little')\nstart_offset = int.from_bytes(complete_bootloader[12:16], byteorder='little')\nbootloader_size = int.from_bytes(complete_bootloader[16:20], byteorder='little')\n\nprint(\"Read BOOTLDR header\")\nprint(\"File size: {0}, bootloader size: {1}\".format(len(complete_bootloader),bootloader_size))\nprint(\"Calculated offset: {0}\".format(len(complete_bootloader)-bootloader_size))\nprint(\"Expecting {0} images\".format(number_of_images))\nprint(\"Start offset: {0}\".format(start_offset))\n\nposition=start_offset\nfor i in range(0,number_of_images):\n info_size = 68 #probably shouldn't hardcode this\n info_start = 20+info_size*i\n info_end = info_start+info_size\n info = complete_bootloader[info_start:info_end]\n image_name = info[0:64].decode('utf-8').rstrip('\\0')\n image_size = int.from_bytes(info[64:68],byteorder='little')\n print(\"{0}: {1} ({2}) at offset {3}\".format(i, image_name,image_size, position))\n bin = open('binaries/'+image_name + '.bin','wb').write(complete_bootloader[position:position+image_size])\n position+=image_size\n\n","sub_path":"Python/nexus4-bootloader-tool/unbootldr.py","file_name":"unbootldr.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"436725587","text":"__author__ = 'vincent'\r\nclass Solution(object):\r\n def removeDuplicates(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n Another typical two index problem.\r\n \"\"\"\r\n\r\n if not nums: return 0;\r\n if len(nums) < 3: return len(nums)\r\n\r\n dupFlag = False\r\n count = 1\r\n\r\n for i in xrange(1, len(nums)):\r\n if nums[i] == nums[i-1]:\r\n if not dupFlag:\r\n nums[count] = nums[i]\r\n count+=1\r\n dupFlag = True\r\n else:\r\n dupFlag = False\r\n nums[count] = nums[i]\r\n count += 1\r\n return count\r\n\r\n\r\n\r\n","sub_path":"80_Remove Duplicates from Sorted Array II.py","file_name":"80_Remove Duplicates from Sorted Array II.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"344379259","text":"import bpy\nimport numpy as np\nimport sys\nsys.path.append(\".\")\n\n#デスクトップ型パソコンのパスの設定\nsys.path.append(\"/Users/hiroshi19/Documents/research_git/calibration\")\nimport cv2 as cv\nimport glob\nimport calibration as cb\n\n#print (np(モジュール名).__file__)でモジュールの場所を調べる\n\n\n\n#ビューに選択されているものを選択\nbpy.ops.object.select_all(action='SELECT')\n\n#すべて選択された状態になったら削除\nbpy.ops.object.delete(True)\n\n#カメラを追加\n#locationはメートル単位\n#rotationは3.14(円周率)でちょうど半周\nbpy.ops.object.camera_add(\n location = (7,0, 0.5),\n rotation = (1.65,0,1.9)\n)\n\n\n#イメージセンサの大きさを求める\ndef get_sensor_size(pint, pint_35, pix_x, pix_y):\n\n #センサーの対角線の長さを求める\n sensor_diagonal = 2 * pint * np.tan(np.arctan2(21.6335,pint_35))\n print(sensor_diagonal)\n\n #センサーの横の長さを求める→引数の解像度から、タテヨコ比を計算して、算出\n width = pix_x * (sensor_diagonal / np.sqrt((pix_x*pix_x) + (pix_y*pix_y)))\n #センサーの縦の長さを求める→引数の解像度から、タテヨコ比を計算して、算出\n height = pix_y * (sensor_diagonal / np.sqrt((pix_x*pix_x) + (pix_y*pix_y)))\n\n return width, height\n\n\n\n\n\n#内部パラメータ行列について\n#行列の変換関数(blenderカメラデータ→内部パラメータ行列)\ndef get_intrinsicMatrix(camera_data):\n #焦点距離\n focus_mm = camera_data.lens\n\n scene = bpy.context.scene\n resolution_ratio = scene.render.resolution_percentage / 100\n\n #センサーの物理的大きさ(mm)\n sensor_width_mm = camera_data.sensor_width\n sensor_height_mm = camera_data.sensor_height\n #ピクセルのタテヨコ比\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n\n if (camera_data.sensor_fit == 'VERTICAL'):\n #verticalは鉛直方向という意味で、sensor_heightが固定\n #uが横方向、vが縦方向\n scale_u = scene.render.resolution_x * resolution_ratio / sensor_width_mm\n scale_v = scene.render.resolution_y * resolution_ratio * pixel_aspect_ratio / sensor_height_mm\n else:\n #'HORIZONTAL' and 'AUTO'\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n scale_u = scene.render.resolution_x * resolution_ratio / sensor_width_mm\n scale_v = scene.render.resolution_y * resolution_ratio * pixel_aspect_ratio / sensor_height_mm\n\n alpha_u = focus_mm * scale_u\n alpha_v = focus_mm * scale_v\n u_0 = scene.render.resolution_x * resolution_ratio / 2\n v_0 = scene.render.resolution_y * resolution_ratio / 2\n skew = 0\n\n kmat = np.zeros((3,3))\n kmat[0,0] = alpha_u\n kmat[1,1] = alpha_v\n kmat[0,2] = u_0\n kmat[1,2] = v_0\n return kmat\n\n\n#blenderカメラデータの代入\ndef change_cameraData(RES_X, RES_Y, RES_PERCENT, PIX_ASPECT_X, PIX_ASPECT_Y, SENSOR_W_mm, SENSOR_H_mm):\n\n #レンダリングパラメータの設定\n bpy.context.scene.render.resolution_x = RES_X\n bpy.context.scene.render.resolution_y = RES_Y\n bpy.context.scene.render.resolution_percentage = RES_PERCENT\n bpy.context.scene.render.pixel_aspect_x = PIX_ASPECT_X\n bpy.context.scene.render.pixel_aspect_y = PIX_ASPECT_Y\n\n #カメラデータの設定\n bpy.data.objects[\"Camera\"].data.sensor_width = SENSOR_W_mm\n bpy.data.objects[\"Camera\"].data.sensor_height = SENSOR_H_mm\n\n\n\n\n#行列の変換関数(内部パラメータ行列→blenderカメラデータ)\n#行列から解像度を設定\ndef get_cameraData(kmat, sensor_size_x, sensor_size_y):\n\n #行列の値を変数に代入\n alpha_u = kmat[0,0]\n alpha_v = kmat[1,1]\n u_0 = kmat[0,2]\n v_0 = kmat[1,2]\n\n\n scene = bpy.context.scene\n camera_data = bpy.data.objects['Camera'].data\n resolution_ratio = scene.render.resolution_percentage / 100\n\n res_x_inBlender = u_0 * 2 / resolution_ratio\n res_y_inBlender = v_0 * 2 / resolution_ratio\n\n #素子(イメージセンサ)サイズ(mm)\n camera_data.sensor_width = sensor_size_x\n camera_data.sensor_height = sensor_size_y\n #ピクセルのタテヨコ比\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n\n #センサーフィットの設定\n camera_data.sensor_fit = 'AUTO'\n\n #スケールパラメータ(センサー上での1mmあたりのピクセル数)の設定\n scale_u = u_0 * 2 / camera_data.sensor_width\n scale_v = v_0 * 2 / camera_data.sensor_height\n\n #ピクセル単位の焦点距離をmm単位の焦点距離に変換\n focus_1 = alpha_u / scale_u\n focus_2 = alpha_v / scale_v\n\n print('focus_length1', focus_1)\n print('focus_length2', focus_2)\n\n\nif __name__ == '__main__':\n\n\n #センサーサイズの取得(解像度の入力はセンサーのタテヨコ比を計算するため)\n sensor_size_x, sensor_size_y = get_sensor_size(4.15,29,4030,3058)\n\n print(\"イメージセンサ サイズ\")\n print(\"横:\",sensor_size_x)\n print(\"縦:\",sensor_size_y)\n\n #キャリブレーション\n PATTERN_WIDTH = 10\n PATTERN_HEIGHT = 7\n checker_size = 23\n pic_location = './data_3/*.JPG'\n images, image_numbers, kmat, dist, rvecs, tvecs, objectpoints, imagepoints, rets = cb.cameraCalibration(PATTERN_WIDTH,PATTERN_HEIGHT,checker_size,pic_location)\n\n #キャリブレーション結果からblenderデータの導出\n print('blenderデータの導出')\n get_cameraData(kmat, sensor_size_x, sensor_size_y)\n","sub_path":"camera_parTobpy_data.py","file_name":"camera_parTobpy_data.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"256971773","text":"\n# Standard library imports\nimport unittest\n\nimport os\n\n# Local imports\nfrom webasic.models.database import DataBase, SQLITE\nfrom webasic.models.person import Person\n\n\nclass DataBaseTest(unittest.TestCase):\n db = None\n\n @classmethod\n def setUpClass(cls):\n if os.path.exists('test.sqlite'):\n try:\n os.remove('test.sqlite')\n except:\n pass\n DataBaseTest.db = DataBase(SQLITE, dbname='test.sqlite')\n DataBaseTest.db.create_db_tables()\n\n @classmethod\n def tearDownClass(cls):\n try:\n os.remove('test.sqlite')\n except:\n pass\n\n # Test insert data base\n def test_insert_database_correct(self):\n\n dict_address = {'address': 'Test', 'city': 'Test', 'postal_code': 111, 'country': 'Test'}\n\n person_to_test = Person(\"PersonTest\", \"SurnameTest\", 111, dict_address, 'test@test.com', 'www.test.es')\n\n result = DataBaseTest.db.person_insert(person_to_test)\n\n self.assertTrue(result)\n\n query_to_execute = '''SELECT Name, Surname FROM Person WHERE Name =\"{}\"'''.format(\n person_to_test.name)\n\n result = DataBaseTest.db.select_data(query_to_execute)\n\n first_result = result[0]\n self.assertEqual(first_result[0], \"PersonTest\")\n self.assertEqual(first_result[1], \"SurnameTest\")\n\n # Test insert data base\n\n def test_insert_database_incorrect(self):\n dict_address = {'address': 'Test', 'city': 'Test', 'postal_code': 111, 'country': 'Test'}\n\n person_to_test = Person(\"PersonTest\", \"SurnameTest\", \"a\", dict_address, 'test@test.com', 'www.test.es')\n\n result = DataBaseTest.db.person_insert(person_to_test)\n\n self.assertIsNone(result)\n\n # Test update data base\n def test_update_database_correct(self):\n\n dict_address = {'address': 'Amazonas 2', 'city': 'Alcorcon', 'postal_code': 28922, 'country': 'spain'}\n\n person_to_find = Person(\"PersonTest\", \"SurnameTest\", 111, dict_address, 'test@test.com', 'www.test.es')\n person = Person(\"NameTest\", \"SurnameTest\", 222, dict_address, 'test@test.com', 'www.test.es')\n\n query_to_execute = '''SELECT Id, Name, Surname FROM Person WHERE Name =\"{}\"'''.format(\n person_to_find.name)\n\n result = DataBase.select_data(self.db, query_to_execute)\n\n record = result[0]\n person.identifier = record[0]\n\n result = DataBaseTest.db.person_update(person)\n\n self.assertTrue(result)\n\n query_to_execute = '''SELECT Name, Surname FROM Person WHERE Name =\"{}\"'''.format(person.name)\n\n result = DataBase.select_data(self.db, query_to_execute)\n\n first_result = result[0]\n self.assertEqual(first_result[0], \"NameTest\")\n self.assertEqual(first_result[1], \"SurnameTest\")\n\n","sub_path":"test/database_test.py","file_name":"database_test.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"561688825","text":"#from urllib2 import parse_http_list as _parse_list_header\nfrom flask import Flask\nfrom flask import request\nimport urllib\n\napp = Flask(__name__)\n\n\n#http://127.0.0.1:5000/showRandArticle?lang=tel\n@app.route('/showRandArticle', methods=['GET', 'POST'])\ndef getByLangAndRandId():\n\n language = request.args.get('lang')\n return 'Hello ' + language + ' usr'\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0')\n\n","sub_path":"python_prgrams/flask.py","file_name":"flask.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"121457680","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\nfrom random import randint\n\n# Initializing Lists\ngods = []\nlevels = []\nkdas = []\ngold = []\ngpms = []\ndamage_dealt = []\ndamage_taken = []\ndamage_mitigated = []\ndamage_inhand = []\nteam_healing = []\nself_healing = []\nstructure_damage = []\nwards = []\ndistance_traveled = []\ngod_name = ' '\ngpm_check = ' '\n\n# Monitor Scraping Efficiency\nstart_time = time.time()\nreq = 0\n\n# read in all match #'s for Incon matches\nmatch_number = pd.read_csv('smite_incon_matches.csv', dtype=str)\nmatch_number.columns = ['Match']\nmatchcol = match_number['Match']\nlist1 = pd.Series.tolist(matchcol)\n\nfor match in list1:\n r = requests.get('https://smite.guru/match/' + match)\n time.sleep(randint(1, 3))\n\n req += 1\n now = time.time()\n time_lapse = now - start_time\n print('Request #: {}; Frequency: {} requests per second'.format(req, req / time_lapse))\n\n soup = BeautifulSoup(r.text, 'html.parser')\n # Grabbing Information from the Match Stats Table\n if soup.find('section', attrs={'id': 'match-stats'}) is not None:\n matches = soup.find('section', attrs={'id': 'match-stats'})\n if matches.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n players = matches.findAll('div', attrs={'class': 'row match-table__row'})\n for i in players:\n if i.find('a').text == 'Incon':\n # Grabbing the name of the god\n god = i.div.div.div.text\n god_name = god\n gods.append(god)\n # Grab the level, K/D/A, Gold Per Minute, Damage Dealt, Damage Taken, Damage Mitigated\n first_table_info = i.findAll('div', attrs={'class': 'row__item'})\n level = first_table_info[0].text\n levels.append(level)\n\n kda = first_table_info[1].text\n kdas.append(kda)\n\n gold1 = first_table_info[2].text\n gold.append(gold1)\n\n gpm = first_table_info[3].text\n gpm_check = gpm\n gpms.append(gpm)\n\n dd = first_table_info[4].text\n damage_dealt.append(dd)\n\n dt = first_table_info[5].text\n damage_taken.append(dt)\n\n dm = first_table_info[6].text\n damage_mitigated.append(dm)\n else:\n level = 'NA'\n levels.append(level)\n\n kda = 'NA'\n kdas.append(kda)\n\n gold1 = 'NA'\n gold.append(gold1)\n\n gpm = 'NA'\n gpms.append(gpm)\n\n dd = 'NA'\n damage_dealt.append(dd)\n\n dt = 'NA'\n damage_taken.append(dt)\n\n dm = 'NA'\n damage_mitigated.append(dm)\n else:\n level = 'NA'\n levels.append(level)\n\n kda = 'NA'\n kdas.append(kda)\n\n gold1 = 'NA'\n gold.append(gold1)\n\n gpm = 'NA'\n gpms.append(gpm)\n\n dd = 'NA'\n damage_dealt.append(dd)\n\n dt = 'NA'\n damage_taken.append(dt)\n\n dm = 'NA'\n damage_mitigated.append(dm)\n\n if soup.findAll('div', attrs={'class': 'match-table'}) is not None:\n d_insights = soup.findAll('div', attrs={'class': 'match-table'})[3]\n if d_insights.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n players_damage = d_insights.findAll('div', attrs={'class': 'row match-table__row'})\n for i in players_damage:\n if i.find('a').text == 'Incon':\n second_table_info = i.findAll('div', attrs={'class': 'row__item'})\n\n # In Hand Damage\n ihd = second_table_info[2].text\n damage_inhand.append(ihd)\n\n # Team Healing\n th = second_table_info[3].text\n team_healing.append(th)\n\n # Self Healing\n sh = second_table_info[4].text\n self_healing.append(sh)\n\n # Structure Damage\n st = second_table_info[7].text\n structure_damage.append(st)\n else:\n # In Hand Damage\n ihd = 'NA'\n damage_inhand.append(ihd)\n\n # Team Healing\n th = 'NA'\n team_healing.append(th)\n\n # Self Healing\n sh = 'NA'\n self_healing.append(sh)\n\n # Structure Damage\n st = 'NA'\n structure_damage.append(st)\n else:\n # In Hand Damage\n ihd = 'NA'\n damage_inhand.append(ihd)\n\n # Team Healing\n th = 'NA'\n team_healing.append(th)\n\n # Self Healing\n sh = 'NA'\n self_healing.append(sh)\n\n # Structure Damage\n st = 'NA'\n structure_damage.append(st)\n\n if soup.findAll('div', attrs={'class': 'match-table'}) is not None:\n farm_insights = soup.findAll('div', attrs={'class': 'match-table'})[4]\n if farm_insights.findAll('div', attrs={'class': 'row match-table__row'}) is not None:\n player_farm = farm_insights.findAll('div', attrs={'class': 'row match-table__row'})\n for i in player_farm:\n if god_name == i.find('img')['alt'] \\\n and gpm_check == i.findAll('div', attrs={'class': 'row__item'})[3].text:\n if len(player_farm) >= 6:\n index = player_farm.index(i)\n third_table_info = player_farm[index].findAll('div', attrs={'class': 'row__item'})\n # Wards Placed\n ward = third_table_info[8].text\n wards.append(ward)\n\n # Distance Traveled\n dist = third_table_info[7].text\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\n else:\n ward = 'NA'\n wards.append(ward)\n\n dist = 'NA'\n distance_traveled.append(dist)\ndamage_data = pd.DataFrame({'God': gods, 'Level': levels, 'KDA': kdas, 'Gold Per Minute': gpms,\n 'Damage Dealt': damage_dealt, 'In Hand Damage Dealt': damage_inhand,\n 'Damage Taken': damage_taken, 'Damage Mitigated': damage_mitigated,\n 'Team Healing': team_healing, 'Self Healing': self_healing,\n 'Structure Damage': structure_damage, 'Wards': wards, 'Distance Traveled': distance_traveled\n })\ndamage_data.info()\ndamage_data.head(5)\n\n#Send to CSV\ndamage_data.to_csv('C:/Users/donav/Documents/Projects/Smite/Damage.csv')\n","sub_path":"Web Scraper/RealScraper2.py","file_name":"RealScraper2.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"34771077","text":"# https://www.tensorflow.org/get_started/mnist/beginners\r\n\r\nfrom PIL import Image\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n\r\nimport tensorflow as tf\r\n\r\nx = tf.placeholder(tf.float32, [None, 784])\r\n\r\nW = tf.Variable(tf.zeros([784, 10])) # Weights\r\nb = tf.Variable(tf.zeros([10])) # Biases\r\ny = tf.nn.softmax(tf.matmul(x, W) + b) # the learning model\r\n\r\ny_ = tf.placeholder(tf.float32, [None, 10]) # base truth\r\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\r\n\r\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\r\n\r\nsess = tf.InteractiveSession()\r\ntf.global_variables_initializer().run()\r\n\r\nfor _ in range(3000):\r\n batch_xs, batch_ys = mnist.train.next_batch(100)\r\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n# read-out\r\nprint(sess.run(accuracy, feed_dict={x:mnist.test.images, y_:mnist.test.labels}))\r\n\r\n# Very basic visualisation of the learned weights\r\n# http://pillow.readthedocs.io/en/4.0.x/\r\nimage_node = 127 + (W * 100) # calculation node that gets our image into a 0..255 range centred on 127\r\nimg_size = (280,28) # MNIST image sizes\r\nimg = Image.frombuffer('F', img_size, sess.run(image_node)).convert('RGB')\r\nimg.save('./my.png')\r\n#img.show()\r\n","sub_path":"04_mnist_softmax.py","file_name":"04_mnist_softmax.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"542871086","text":"# 1. В диапазоне натуральных чисел от 2 до 99 определить, сколько из них кратны каждому из чисел в диапазоне от 2 до 9.\n# Примечание: 8 разных ответов.\n\n# init result list\nresult = [0 for _ in range(2, 10)]\n\nfor num in range(2, 100):\n for comp_num in range(2, 10):\n if num % comp_num == 0:\n result[comp_num - 2] += 1\n\nfor comp_num in range(2, 10):\n print(f\"Number {comp_num} - {result[comp_num - 2]}\")\n","sub_path":"lesson3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"332057973","text":"from Course import *\nimport itertools\nfrom scrape_websoc import *\n\n\ndef permute_schedules(courses: {str: {Course: {str: [Course]}}}, only: {int} = None):\n '''\n input:\n ' ': {\n : {\n 'Dis': [],\n 'Lab': []\n }, {...}\n }\n '''\n d = []\n for x in courses.values():\n group = set()\n for lec, children in x.items():\n if not only or lec.code in only:\n incld_children = ((x for x in c if not only or x.code in only) for c in children.values())\n for i in itertools.product([lec], *incld_children):\n group.add(i)\n # print('----------')\n # for y in x:\n # print('{} {} {} ({})'.format(y.num, y.c_type, y.section, y.code))\n # print('----------')\n\n d.append(group)\n return exclude_conflicts(itertools.product(*d))\n\n\nhm = 0\n\n\ndef exclude_conflicts(cs: iter):\n for i in cs: # course group combo\n global hm\n hm += 1\n valid = True\n for x in itertools.combinations(i, 2): # pick 2 groups from combo\n if valid:\n for (a, b) in itertools.product(*x): # cartesian product\n if a.conflicts_with(b):\n valid = False\n break\n else:\n break\n if valid:\n yield i\n\n\nif __name__ == '__main__':\n courses = get_department('I&C Sci')\n d = parse_sections(courses)\n sub = {k: d[k] for k in ('I&C Sci 51', 'I&C Sci 53')}\n courses = get_department('CHINESE')\n d = parse_sections(courses)\n sub2 = {k: d[k] for k in ('Chinese 1B',)}\n sub.update(sub2)\n\n i = 0\n start = datetime.now()\n for x in permute_schedules(sub):\n i += 1\n print(['{} {} {} ({})'.format(y.num, y.c_type, y.section, y.code) for y in itertools.chain.from_iterable(x)])\n end = datetime.now()\n print('generated {}/{} schedules (discarded {} due to conflicts) in {}s'.format(i, hm, hm - i,\n (end - start).total_seconds()))\n","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"27475990","text":"'''\nWrite a program that takes as input a set sorted sequences and computes the union of these\nsequences as a sorted sequences. For example, if the input is [3, 5, 7], [0, 6], [0, 6, 28]\nthen the output is: [0, 0, 3, 5, 6, 6, 7, 28]\n\nBrute-force approach is to concatenate these sequences into a single array and sort\nTime-complexity: O(nlogn)\n'''\n\n'''\nApproach: A min-heap is ideal for maintaining a collection of elements when we need to add\narbitrary values and extract the smallest element. As a concrete example, suppose there are \nthree sorted arrays to be merged: (3, 5, 7), (0, 6), and (0, 6, 28)\nThe min-heap is initialized to the first entry of each array, i.e., it is [0, 0, 3]. \nWe extract the smallest entry, 0, and add it to the output which is (0). Then we add 6 to the\nmin-heap which is {3,0,6} now (We chose the 0 entry corresponding to the third array \narbitrarily, it would be perfectly acceptable to choose from the second array.) \nNext, extract 0, and add it to the output which is [0,0]; then add 6 to the min-heap which\nis [3,6,6]. Next, extract 3, and add it to the output which is (0,0,3); then add 5 to the \nmin-heap which is [5,6,6]. Next, extract 5, and add it to the output which is [0,0,3,5]; \nthen add 7 to the min-heap which is [7,6,6]. Next, extract 6, and add it to the output which \nis [0,0,3,5,6]; assuming 5 is selected from the second array, which has no remaining elements, \nthe min-heap is [7,6]. Next, extract 6, and add it to the output which is (0,0,3,5,6,6); \nthen add 28 to the min-heap which is [7,28]. Next, extract 7, and add it to the output which\nis (0,0,3,5,6,6,7); the min-heap is {28}. Next, extract 28, and add it to the output which is\n[0,0,3,5,6,6,7,28]; now, all elements are processed and the output stores the sorted \nelements.\n\nlet k be the number of input sequences. Then there are no more than k elements in the min-heap.\nBoth extract-min and insert take O(logk) time. Hence, we can do the merge in O(nlogk) time.\nThe space complexity is O(k) beyond the space needed to write the final result. In particular,\nif the data comes from files and is written to a file, instead of arrays, we would need only\nO(k) additional storage\n'''\nimport heapq\ndef merge_sorted_arrays(sorted_arrays):\n min_heap = []\n # Builds a list of iterators for each array in sorted_arrays.\n sorted_arrays_iters = [iter(x) for x in sorted_arrays]\n\n # Puts first element from each iterator in min_heap. NOTE: enumerate returns a tuple\n for i, it in enumerate(sorted_arrays_iters):\n first_element = next(it, None)\n if first_element is not None:\n heapq.heappush(min_heap, (first_element, i))\n \n result = []\n while min_heap:\n smallest_entry, smallest_array_i = heapq.heappop(min_heap)\n smallest_array_iter = sorted_arrays_iters[smallest_array_i]\n result.append(smallest_entry)\n next_element = next(smallest_array_iter, None)\n if next_element is not None:\n heapq.heappush(min_heap, (next_element, smallest_array_i))\n\n return result\n\nsorted_arrays = [[3, 5, 7], [0, 6], [0, 6, 28]]\nresult = merge_sorted_arrays(sorted_arrays)\nprint(result)","sub_path":"Python/EPI/Heaps/merge_sorted_arrays.py","file_name":"merge_sorted_arrays.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"37618598","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .serializers import *\nfrom .models import *\n\n\nclass IndexView(APIView):\n \"\"\"\n View class for index page\n \"\"\"\n @staticmethod\n def get(request, format=None):\n \"\"\"\n GET method. Lists all created polls\n :param request: Http request\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data of all polls.\n \"\"\"\n polls = PollModel.objects.all()\n serializer = PollListSerializer(polls, many=True)\n return Response(serializer.data)\n\n\nclass CreateView(APIView):\n \"\"\"\n View class for creating poll\n \"\"\"\n @staticmethod\n def post(request, format=None):\n \"\"\"\n POST method. Creates a new poll by given request. Returns http response as type application/json\n :param request: JSON containing poll title and selections\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: http 201 if created successfully, 400 if creation failed.\n \"\"\"\n body = request.data\n\n try:\n poll = PollModel(title=body['title'])\n poll.save()\n except KeyError:\n response = dict(error='KeyError')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n response = dict(poll=body['title'], selections=list(), count=0)\n\n try:\n for choice in body['choices']:\n selection = SelectionModel(poll=poll, body=choice)\n selection.save()\n response['selections'].append(choice)\n response['count'] += 1\n return Response(response, status=status.HTTP_201_CREATED)\n\n except KeyError:\n response = dict(error='KeyError')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UpdateView(APIView):\n \"\"\"\n View class for updating poll\n \"\"\"\n @staticmethod\n def post(request, format=None):\n \"\"\"\n POST method. Updates poll status and returns http response as type application/json\n :param request: JSON data, including poll title and to-be-modified selections\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: Http 202 if valid, Http 400 if invalid request.\n \"\"\"\n body = request.data\n try:\n poll = PollModel.objects.get(title=body['poll'])\n except PollModel.DoesNotExist:\n response = dict(error='Poll does not exist')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n response = dict(poll=body['poll'], updated=list())\n\n try:\n for choice in body['choices']:\n selection = SelectionModel.objects.filter(poll=poll).filter(body=choice['name']).get()\n if choice['selected'] is True:\n selection.num_people += 1\n response['updated'].append({'name': choice['name'], 'update': 'increased'})\n elif selection.num_people > 0:\n selection.num_people -= 1\n response['updated'].append({'name': choice['name'], 'update': 'decreased'})\n selection.save()\n return Response(response, status=status.HTTP_202_ACCEPTED)\n\n except SelectionModel.DoesNotExist:\n response = dict(error='Selection does not exist')\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ResultView(APIView):\n \"\"\"\n View class for returning poll result\n \"\"\"\n @staticmethod\n def get(request, format=None):\n \"\"\"\n Returns result of all poll\n :param request: http request\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data, containing result of all polls\n \"\"\"\n polls = PollModel.objects.all()\n serializer = PollSerializer(polls, many=True)\n return Response(serializer.data)\n\n @staticmethod\n def post(request, format=None):\n \"\"\"\n Returns result of given poll title\n :param request: JSON data, including poll title\n :param format: Either json or html. Returns response as given format. Default is json.\n :return: JSON data, containing result of given poll\n \"\"\"\n print(request.data)\n try:\n poll = PollModel.objects.get(title=request.data['title'])\n serializer = PollSerializer(instance=poll)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n serializer = PollSerializer(data=request.data)\n serializer.is_valid()\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"api/poll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"433325715","text":"from django.shortcuts import render\nfrom django.contrib import messages\nfrom .models import Plant, PlantScrap\n\nfrom django.views.generic import ListView\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\nimport json\n\n\nclass PlantListView(ListView):\n model = Plant\n paginate_by = 6\n template_name = 'search/main_plant.html'\n context_object_name = 'plant_list'\n\n def get_queryset(self):\n plant_list = Plant.objects.order_by('name')\n return plant_list\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['plants'] = Plant.objects.all()\n paginator = context['paginator']\n page_numbers_range = 5\n max_index = len(paginator.page_range)\n\n page = self.request.GET.get('page')\n current_page = int(page) if page else 1\n\n start_index = int((current_page - 1) /\n page_numbers_range) * page_numbers_range\n end_index = start_index + page_numbers_range\n if end_index >= max_index:\n end_index = max_index\n\n page_range = paginator.page_range[start_index:end_index]\n context['page_range'] = page_range\n\n search_keyword = self.request.GET.get('q', '')\n search_type = self.request.GET.get('type', '')\n scrap_plant_list = []\n if self.request.user.is_authenticated:\n user = self.request.user\n scrap_list = PlantScrap.objects.filter(user=user)\n for scrap in scrap_list:\n plant = Plant.objects.get(pk=scrap.plant.pk)\n scrap_plant_list.append(plant)\n\n else:\n scrap_plant_list = []\n\n context['scrap_plant_list'] = scrap_plant_list\n if len(search_keyword) > 1:\n context['q'] = search_keyword\n context['type'] = search_type\n\n return context\n\n def get_queryset(self):\n search_keyword = self.request.GET.get('q', '')\n search_type = self.request.GET.get('type', '')\n plant_list = Plant.objects.order_by('name')\n\n if search_keyword:\n if len(search_keyword) > 1:\n if search_type == 'all':\n search_plant_list = plant_list.filter(\n Q(name__icontains=search_keyword))\n elif search_type == 'name':\n search_plant_list = plant_list.filter(\n Q(name__icontains=search_keyword))\n elif search_type == 'content':\n search_plant_list = plant_list.filter(\n Q(content__icontains=search_keyword))\n elif search_type == 'managelevel':\n search_plant_list = plant_list.filter(\n Q(management_level__icontains=search_keyword))\n return search_plant_list\n else:\n messages.error(self.request, '검색어는 2글자 이상 입력해주세요.')\n return plant_list\n\n\ndef main_plant(request):\n return render(request, 'search/main_plant.html')\n\n\ndef plant_detail(request, pk):\n plant = Plant.objects.get(pk=pk)\n ctx = {\n \"plant\": plant\n }\n return render(request, \"search/plant_detail.html\", ctx)\n\n\n@csrf_exempt\ndef scrap_ajax(request):\n req = json.loads(request.body)\n plant_id = req['Id']\n plant = Plant.objects.get(pk=plant_id)\n user = request.user\n\n if PlantScrap.objects.filter(user=user).filter(plant=plant):\n scrap = PlantScrap.objects.filter(user=user).get(plant=plant)\n scrap.delete()\n button_type = 'del_scrap'\n else:\n scrap = PlantScrap(user=user, plant=plant)\n scrap.save()\n button_type = 'scrap'\n print(button_type)\n return JsonResponse({'id': plant.pk, 'type': button_type})\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"436070063","text":"import numpy as np\nfrom utils.models.vae import VAE\nfrom utils.models.rnn import RNN\nfrom pathlib import Path\nimport cv2\nfrom utils import utils, model_config\n\nvideo_frame_folder_path = Path(\"../data/my_video/frames\")\n\nframe_paths = np.sort(list(video_frame_folder_path.glob(\"*.jpg\")))\nframes = np.array([utils.frame_preprocessor(cv2.imread(str(p))) for p in frame_paths])\n\n# Train VAE\nvae = VAE.init_default()\nvae.train(frames, 25, include_callbacks=False)\n\n# Create data for RNN\nencoded_images = vae.encoder.predict(frames)\ndecoded_images = (vae.decoder.predict(encoded_images) * 255).astype(np.uint8)\nx_rnn_data, y_rnn_data = utils.create_rnn_data(encoded_images, model_config.GRU_TIME_STEPS)\n\n# Train RNN\nrnn = RNN.init_default()\nrnn.train(x_rnn_data, y_rnn_data, 100, include_callbacks=False)\n\n# Generate frames\nn_images_to_generate = 100\nstarter_frames = frames[0:model_config.GRU_TIME_STEPS]\nstarter_frames = np.array([utils.frame_preprocessor(x) for x in starter_frames])\ngenerated_encoded_frames = vae.encoder.predict(starter_frames)\n\nfor i in range(n_images_to_generate):\n next_frame = rnn.model.predict(np.expand_dims(generated_encoded_frames[i:i + model_config.GRU_TIME_STEPS], axis=0))\n generated_encoded_frames = np.vstack((generated_encoded_frames, next_frame))\n\n# Remove the manually created \"starter\" frames\ngenerated_encoded_frames = generated_encoded_frames[model_config.GRU_TIME_STEPS:, :]\n\n# Decode the predicted images\ngenerated_decoded_frames = vae.decoder.predict(generated_encoded_frames)\ngenerated_decoded_frames = (generated_decoded_frames * 255).astype(np.uint8)\n\n# Save the generated frames\ngenerated_frames_folder = Path(\"./generated_frames\")\ngenerated_frames_folder.mkdir(exist_ok=True)\n\nfor i, generated_frame in enumerate(generated_decoded_frames):\n image_name = \"{0}.jpg\".format(str(i).ljust(6, \"0\"))\n cv2.imwrite(image_name, generated_frame)\n","sub_path":"video_generation.py","file_name":"video_generation.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"496705608","text":"\"\"\"Code to output web source code.\n\"\"\"\nimport os\nfrom subprocess import call\nimport json\nimport shutil\nfrom termcolor import cprint\nfrom .utils import WorkingDirectory\n\nclass Web(object):\n \"\"\"Class to handle web source code.\n \"\"\"\n def __init__(self, project):\n self.project = project\n\n def initialise(self):\n \"\"\"Initialise web project.\n \"\"\"\n cprint('Creating react web project if it doesn\\'t exist', color='yellow')\n fresh_project = False\n if os.path.exists('web'):\n with WorkingDirectory('web'):\n call(['yarn', 'install'])\n else:\n call(['create-react-app', self.project.project_name_lowercased])\n call(['mv', self.project.project_name_lowercased, 'web'])\n fresh_project = True\n with WorkingDirectory('web'):\n if self.project.run_yarn or fresh_project:\n cprint('Running yarn add for additional dependencies')\n packages_to_add = [\"aphrodite\", \"concurrently\", \"enzyme\", \"react-dom\", \"rimraf\",\n \"react-redux\", \"react-router-dom\", \"@types/react\", \"redux-logger\", \"redux-thunk\", \"redux\"]\n\n dev_packages_to_add = [\"react-addons-test-utils\", \"react-scripts-ts\", \"ts-jest\", \"ts-node\", \"typescript\"]\n call(['yarn', 'add'] + packages_to_add)\n call(['yarn', 'add', '--dev'] + dev_packages_to_add)\n core_script = \"mkdir -p node_modules\" + os.sep + self.project.project_name_lowercased + \"core \" + \\\n \"&& cp ../core/package.json node_modules\" + os.sep + self.project.project_name_lowercased + \"core\" + os.sep + \"package.json \" + \\\n \"&& cp -r ../core/dist node_modules\" + os.sep + self.project.project_name_lowercased + \"core/dist\"\n cprint('Updating package.json scripts', color='yellow')\n scripts = {\n \"link:core\": core_script,\n \"test\": \"jest\",\n \"tsc\": \"tsc\",\n \"clean\": \"rimraf artifacts\",\n \"build\": \"yarn run clean && yarn run link:core && yarn run tsc --\",\n \"watch\": \"yarn run build -- -w\",\n \"start\": \"yarn run link:core && PORT=3002 react-scripts-ts start\",\n }\n package_json = json.loads(open('web' + os.sep + 'package.json', 'r').read())\n package_json['scripts'] = scripts\n package_json_file = open('web' + os.sep + 'package.json', 'w')\n package_json_file.write(json.dumps(package_json, sort_keys=True, indent=4, separators=(',', ': ')))\n\n if os.path.exists('web' + os.sep + 'src'):\n shutil.rmtree('web' + os.sep + 'src')\n os.mkdir('web' + os.sep + 'src')\n\n def output(self):\n \"\"\"Output web source code.\n \"\"\"\n cprint('\\tSetting up typescript code', color='yellow')\n self.project.write_asset_template('web/src', 'index.tsx.j2', 'index.tsx')\n self.project.write_asset_template('web/src/containers', 'app.tsx.j2', 'app.tsx')\n self.project.write_asset_template('web/src/containers', 'login.tsx.j2', 'login.tsx')\n","sub_path":"react_bootstrap/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"18709920","text":"import math\nimport random\n\n\ndef play_guess(x, y):\n num_list = []\n\n for i in range(10):\n num_list.append(random.randint(x, y))\n\n while True:\n tip = int(input(\"Enter an integer from {0} to {1}: \".format(x, y)))\n\n if tip < num_list[i]:\n print(\"Guess is low.\")\n elif tip > num_list[i]:\n print(\"Guess is high.\")\n else:\n break\n\n print(\"You guessed it!\")\n\n\nplay_guess(1, 99)\n\nplay_guess(1, 49)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"569024897","text":"\"\"\"\nAuthor: Le Tuan Luc\nDate: 2021/07/19\nProgram: exercise_02_page_72.py\nProblem:\n Write a code segment that displays the values of the integers x, y, and z on a single line, such that each value is right-justified with a field width of 6.\nSolution:\n print(\"%6s\" % )\n >>>\n\"\"\"\nx = 123\ny = 43\nz = 56789\nprint(\"|%6s\" % x, \"|%6s\" % y, \"|%6s\" % z)","sub_path":"chapter03/page_72/exercise_02_page_72.py","file_name":"exercise_02_page_72.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"71783719","text":"\"\"\"充值接口测试\"\"\"\nimport json\nimport os\nimport decimal\nfrom decimal import Decimal\n\nimport pytest\nimport requests\nfrom common.excel_handler import ExcelHandler\nfrom config.path import data_path\nfrom common.yaml_handler import yaml_config,user_config\nfrom common.logger_hander import logger\nfrom common.helper import generate_new_phone\nfrom common.db_handler import DBHandler\n\nexcel_file = os.path.join(data_path,'demo.xlsx')\ndata = ExcelHandler(excel_file).read_dict('recharge')\nprint(data)\n\n#db.db_colse()\n@pytest.mark.parametrize('info',data)\ndef test_recharge(info,login):\n \"\"\"充值\"\"\"\n \"\"\"先要替换\"\"\"\n if \"#member_id#\" in info['json']:\n info[\"json\"] = info[\"json\"].replace('#member_id#',str(login['id']))\n if \"#wrong_member_id#\" in info['json']:\n info[\"json\"] = info[\"json\"].replace('#wrong_member_id#', str(login['id'] + 1))\n\n # # token组装方式1:通过excel替换\n # if \"#token#\" in info['headers']:\n # info[\"headers\"] = info[\"headers\"].replace(\"#token#\",login['token'])\n\n # token 组装2:通过headers 添加,excel 表格里面不需要Authorization\n headers = json.loads(info[\"headers\"])\n headers['Authorization'] = login['token']\n\n # 数据库访问,充值之前的余额\n db = DBHandler()\n sql = 'select leave_amount from member where id={}'.format(login['id'])\n result = db.query(sql)\n before_recharge_money =result['leave_amount']\n db.db_colse()\n\n data = json.loads(info['json'])\n res= requests.request(url= yaml_config['host'] + info['url'],\n method=info['method'],\n headers=headers,\n json= data)\n res_body = res.json()\n print(res_body)\n try:\n assert res_body['code'] == info[\"expected\"]\n except AssertionError as e:\n logger.error(\"用例失败:{}\".format(e))\n raise e\n finally:\n excel = ExcelHandler(excel_file)\n excel.write('recharge',str(res_body),row=int(info['case_id']+1),column=9)\n if res_body['code'] == 0:\n db = DBHandler()\n sql = 'select leave_amount from member where id={}'.format(login['id'])\n result = db.query(sql)\n after_recharge_money = result['leave_amount']\n db.db_colse()\n money = Decimal(str(data['amount']))\n assert before_recharge_money + money == after_recharge_money\n if res_body['code'] == info[\"expected\"]:\n excel.write('recharge',True,row=int(info['case_id']+1),column=8)\n else:\n excel.write('recharge',False,row=int(info['case_id']+1),column=8)\n","sub_path":"lesson27_api_v6/test/test_recharge_db.py","file_name":"test_recharge_db.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"163558698","text":"#!/usr/bin/env python3\n'''\nThis program uses naive Bayes to guess the language of a text.\n'''\n\nimport argparse\nimport operator\nimport functools\nimport csv\nfrom decimal import Decimal, getcontext\n\n# Increase the precision of out calculations. Too much small numbers.\ngetcontext().prec = 100\n\n# Some constants\nPI = Decimal('3.141592653589793238462643383')\nLETTER_PROBABILITIES_FILE = 'letter_probabilities.csv'\nLANGUAGE_PROBABILITIES_FILE = 'language_probabilities.csv'\n\n# Probability of the language and the letter in language from A to Z.\n# Example:\n# 'English': {\n# 'probability': 0.25,\n# 'A': { 'mean': 0.12345, 'variance': 0.0000001 }\n# 'B'...\n# }\nLETTER_FREQUENCY_IN_LANGUAGES = {}\n\ndef main(files_to_analyze, language_file, letter_file):\n '''Main function that analyzes the text and produce the output'''\n load_data_from_disk(language_file, letter_file)\n for file_to_analyze in files_to_analyze:\n letter_frequency = letter_frequency_from_file(file_to_analyze)\n posterior = calculate_posterior(letter_frequency)\n print(language_with_highest_posterior(posterior))\n\ndef load_data_from_disk(language_file, letter_file):\n '''Load the data from the csv files with the probabilities.'''\n with open(language_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n LETTER_FREQUENCY_IN_LANGUAGES[row['Language']] = \\\n {'probability': Decimal(row['Probability'])}\n\n with open(letter_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n LETTER_FREQUENCY_IN_LANGUAGES[row['Language']][row['Letter']] = {\n 'mean': Decimal(row['Probability']),\n 'variance': Decimal(row['Variance'])}\n\ndef letter_frequency_from_file(file_to_analyze):\n '''Calculate the frequency of each letter in the file and return\n them as a dictionary, having each letter as the key and the\n frequency as the value'''\n letters = count_letters_from_file(file_to_analyze)\n total_letters = sum(letters.values())\n return {char: count/total_letters for char, count in letters.items()}\n\ndef count_letters_from_file(file_to_analyze):\n '''Count the valid letters in a file'''\n letters = {} # Frequency of each letter\n # Let's read the file and count the letters\n with open(file_to_analyze,'r') as f:\n for char in iter(lambda: f.read(1).upper(), ''):\n if is_valid_character(char):\n letters[char] = letters.get(char, 0) + 1\n return letters\n\ndef is_valid_character(char):\n '''Check if the character is a valid character of analysis'''\n return len(char) == 1 and \\\n (ord('A') <= ord(char) <= ord('Z'))\n\ndef calculate_posterior(letter_frequency):\n posterior = {}\n for lang in LETTER_FREQUENCY_IN_LANGUAGES.keys():\n probs = [probability_of_letter(frequency, letter, lang) \\\n for letter, frequency in letter_frequency.items()]\n probs.append(probability_of_language(lang))\n posterior[lang] = functools.reduce(operator.mul, probs)\n return posterior\n\ndef probability_of_language(language):\n '''Probability of each language occur'''\n return LETTER_FREQUENCY_IN_LANGUAGES[language]['probability']\n\ndef mean_probability_of_letter(letter, language):\n '''Return the mean of the probability of a letter of a given language'''\n # FIXME the division by 100 is there because values are fixed in %\n # Must create a training program to generate those values and normalize between 0..1\n return LETTER_FREQUENCY_IN_LANGUAGES[language][letter]['mean']\n\ndef variance(letter, language):\n '''Return the variance for a letter in a given language'''\n return LETTER_FREQUENCY_IN_LANGUAGES[language][letter]['variance']\n\ndef probability_of_letter(frequency, letter, language):\n '''Calculate the probability of a given letter belongs to the language'''\n mean = mean_probability_of_letter(letter, language)\n var = variance(letter, language)\n return Decimal(-((Decimal(frequency) - mean)**2/(2*var))).exp()/Decimal(2*PI*var).sqrt()\n\ndef language_with_highest_posterior(posterior):\n values = list(posterior.values())\n keys = list(posterior.keys())\n return keys[values.index(max(values))]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='This program uses \\\n naive Bayes to find out the language of a text.')\n parser.add_argument(\n 'file',\n nargs='+',\n help='File to be analyzed.'\n )\n parser.add_argument(\n '--letter',\n default='letter_probabilities.csv',\n help='CSV file with the probabilities of each letter in each language'\n )\n parser.add_argument(\n '--language',\n default='language_probabilities.csv',\n help='CSV file with the probabilities of each language'\n )\n args = parser.parse_args()\n main(args.file, args.language, args.letter)\n","sub_path":"naive_lang_classifier.py","file_name":"naive_lang_classifier.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"180820477","text":"from tkinter import filedialog as fd\nfrom tkinter.messagebox import showinfo\nfrom tkinter import *\n\n\ndef selectFile(action, target):\n \"\"\"Function used to open the csv file to read to file name/ directory\n to used with different action to update the target folder\n\n Args:\n action (string): the action to perform with file\n target (variable): the target to update with the result of action regarding the file\n \"\"\"\n filetypes = (\n ('csv', '*.csv'),\n )\n filename = fd.askopenfilename(\n title='Select csv datasource',\n initialdir='./data-sample',\n filetypes=filetypes)\n\n if action == \"upload\":\n target.insert(END, filename)\n","sub_path":"controller/menu/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"105611339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='国名', max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Team',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='チーム名', max_length=100)),\n ('country', models.ForeignKey(verbose_name='所属国', to='raul.Country')),\n ],\n ),\n migrations.CreateModel(\n name='TeamUser',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('position_type', models.CharField(verbose_name='ポジション', choices=[('0', 'FW'), ('1', 'MF'), ('2', 'DF'), ('3', 'GK'), ('9', '監督')], max_length=1)),\n ('belong_to', models.ForeignKey(verbose_name='所属チーム', to='raul.Team')),\n ('user', models.ForeignKey(verbose_name='ユーザ', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"raul/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"368027551","text":"import json, sys\n\ndef load_token(path):\n try:\n with open(path) as file:\n token = json.load(file)\n return token['token']\n except Exception as e:\n print('[ERR] config.load_token: {0}'.format(e))\n sys.exit(1)\n","sub_path":"dist-packages/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"265361138","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport paddle.fluid.core as core\nfrom op_test import OpTest\n\n\ndef rpn_target_assign(iou, rpn_batch_size_per_im, rpn_positive_overlap,\n rpn_negative_overlap, fg_fraction):\n iou = np.transpose(iou)\n anchor_to_gt_max = iou.max(axis=1)\n gt_to_anchor_argmax = iou.argmax(axis=0)\n gt_to_anchor_max = iou[gt_to_anchor_argmax, np.arange(iou.shape[1])]\n anchors_with_max_overlap = np.where(iou == gt_to_anchor_max)[0]\n\n tgt_lbl = np.ones((iou.shape[0], ), dtype=np.int32) * -1\n tgt_lbl[anchors_with_max_overlap] = 1\n tgt_lbl[anchor_to_gt_max >= rpn_positive_overlap] = 1\n\n num_fg = int(fg_fraction * rpn_batch_size_per_im)\n fg_inds = np.where(tgt_lbl == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = np.random.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n tgt_lbl[disable_inds] = -1\n fg_inds = np.where(tgt_lbl == 1)[0]\n\n num_bg = rpn_batch_size_per_im - np.sum(tgt_lbl == 1)\n bg_inds = np.where(anchor_to_gt_max < rpn_negative_overlap)[0]\n if len(bg_inds) > num_bg:\n enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]\n tgt_lbl[enable_inds] = 0\n bg_inds = np.where(tgt_lbl == 0)[0]\n\n loc_index = fg_inds\n score_index = np.hstack((fg_inds, bg_inds))\n tgt_lbl = np.expand_dims(tgt_lbl, axis=1)\n return loc_index, score_index, tgt_lbl\n\n\nclass TestRpnTargetAssignOp(OpTest):\n def setUp(self):\n iou = np.random.random((10, 8)).astype(\"float32\")\n self.op_type = \"rpn_target_assign\"\n self.inputs = {'DistMat': iou}\n self.attrs = {\n 'rpn_batch_size_per_im': 256,\n 'rpn_positive_overlap': 0.95,\n 'rpn_negative_overlap': 0.3,\n 'fg_fraction': 0.25,\n 'fix_seed': True\n }\n loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 256, 0.95, 0.3,\n 0.25)\n self.outputs = {\n 'LocationIndex': loc_index,\n 'ScoreIndex': score_index,\n 'TargetLabel': tgt_lbl,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestRpnTargetAssignOp2(OpTest):\n def setUp(self):\n iou = np.random.random((10, 20)).astype(\"float32\")\n self.op_type = \"rpn_target_assign\"\n self.inputs = {'DistMat': iou}\n self.attrs = {\n 'rpn_batch_size_per_im': 128,\n 'rpn_positive_overlap': 0.5,\n 'rpn_negative_overlap': 0.5,\n 'fg_fraction': 0.5,\n 'fix_seed': True\n }\n loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 128, 0.5, 0.5,\n 0.5)\n self.outputs = {\n 'LocationIndex': loc_index,\n 'ScoreIndex': score_index,\n 'TargetLabel': tgt_lbl,\n }\n\n def test_check_output(self):\n self.check_output()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py","file_name":"test_rpn_target_assign_op.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"47381015","text":"\n# coding: utf-8\n\n# In[51]:\n\n\nimport cv2\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport os\nimport pickle\nimport numpy.linalg as linalg\nfrom sklearn.model_selection import train_test_split\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.naive_bayes import GaussianNB\nimport seaborn\nfrom numpy.random import choice\nfrom sklearn.tree import DecisionTreeClassifier\nimport math\nfrom sklearn import svm\nimport seaborn as sns\nimport matplotlib.patheffects as PathEffects\nimport torchvision.models as models\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.utils.data as utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom PIL import Image\nimport matplotlib.cm as cm\nimport imghdr\nimport tensorflow as tf\n\n\n# In[52]:\n\n\nfrom sklearn.svm import SVC\n\n\n# In[53]:\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\n\n\n# In[54]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[55]:\n\n\ndef mean_cal(data):\n main_mean=[]\n for i in range(len(data[0])):\n temp=[]\n for j in range(len(data)):\n temp.append(data[j][i])\n mean1=np.mean(np.array(temp))\n main_mean.append(mean1)\n return main_mean\n \n\n\n# In[56]:\n\n\ndef cal_eigenenergy(arr,k):\n sum=0\n for i in range(len(arr)):\n sum+=arr[i]\n initial=arr[0]\n counter=0\n while initial<(sum*k/float(100)):\n counter+=1\n initial+=arr[counter]\n return counter+1\n \n\n\n# In[57]:\n\n\ndef pca(data,k): # k is eigen energy\n data=np.array(data)\n mean=np.array(mean_cal(data))\n normal_data=[]\n for i in range(len(data)):\n normal_data.append(np.subtract(data[i],mean))\n normal_data=np.array(normal_data)\n cov_mat=np.cov(np.transpose(normal_data))\n eig_val, eig_vect = linalg.eigh(cov_mat)\n eig_vect=np.transpose(eig_vect)\n# eig_val=abs(eig_val)\n eig_val1=copy.deepcopy(eig_val)\n eig_vect1=copy.deepcopy(eig_vect)\n for i in range(len(eig_val)):\n if eig_val[i]<0:\n eig_val1[i]=eig_val[i]*(-1)\n \n eig_vect_set=sort_list(eig_vect1.tolist(),eig_val1.tolist())\n eig_vect_set.reverse()\n \n eig_val2=sorted(eig_val1, reverse=True)\n k=cal_eigenenergy(eig_val2,k)\n# print(k)\n eig_vect_set=eig_vect_set[:k]\n# print(\"Eig Vl:\",eig_val)\n# print(\"Eig Vect : \",eig_vect_set)\n \n# print(eig_vect_set)\n print(\"Features it took are : \",k)\n eig_vect=np.transpose(eig_vect_set)\n# dot_result=np.dot(normal_data,eig_vect)\n return eig_vect\n\n\n# In[58]:\n\n\n#geeks for geeks \ndef sort_list(list1, list2): \n \n zipped_pairs = zip(list2, list1) \n \n z = [x for _, x in sorted(zipped_pairs)] \n \n return z \n\n\n# In[59]:\n\n\ndef labelling(predict,true):\n h=[]\n for i in range(len(predict)):\n if predict[i]==true[i]:\n h.append(1)\n else:\n h.append(0)\n return h\n\n\n# In[60]:\n\n\ndef accuracy(predict,true):\n count=0\n for i in range(len(predict)):\n if predict[i]==true[i]:\n count+=1\n return count/float(len(predict))\n\n\n# In[61]:\n\n\ndef find_tpr_fpr(predict,real,checker):\n tp=0\n tn=0\n fp=0\n# print(\"find_tpr_fpr\")\n fn=0\n voc=copy.deepcopy([0,1,2,3,4,5,6,7,8,9])\n v=voc.index(checker)\n del voc[v]\n for i in range(len(predict)):\n if predict[i]==checker and real[i]==checker:\n tp=tp+1\n if (predict[i] in voc ) and real[i]==checker:\n fn=fn+1\n if predict[i]==checker and (real[i] in voc):\n fp=fp+1\n if (predict[i] in voc) and (real[i] in voc):\n tn=tn+1\n tpr2=0\n fpr2=0\n# print(\"Total :\",(tp+fp+tn+fn))\n tpr2=float(tp/float(tp+fn)) \n fpr2=float(fp/float(fp+tn))\n \n return tpr2,fpr2\n\n\n# In[62]:\n\n\ndef adaboost(n,train_data1,train_label1,test_data,test_label,weights,d):\n \n alpha_k=[]\n Ck=[]\n nat=[i for i in range(len(train_data1))]\n main_data=copy.deepcopy(train_data1)\n main_label=copy.deepcopy(train_label1)\n \n for i in range(n):\n print(i)\n# print(\"Hello : \",i)\n sample = choice(nat, d,p=weights,replace=False)\n# sample = choice(nat,d,weights,replace=False)\n train_data=[]\n train_label=[]\n for j in range(len(sample)):\n train_data.append(train_data1[sample[j]])\n train_label.append(train_label1[sample[j]])\n \n clf=DecisionTreeClassifier(max_depth=3,max_leaf_nodes=10)\n clf.fit(np.array(train_data),np.array(train_label))\n predict1=clf.predict(np.array(main_data))\n h=labelling(predict1.tolist(),main_label)\n train_err=clf.score(np.array(train_data),np.array(train_label))\n train_err=1-train_err\n alpha=0.5*np.log((1-train_err)/float(train_err))+np.log(25)\n alpha_k.append(alpha)\n Ck.append(clf)\n# print(\"Hello1 : \",i)\n for j in range(len(weights)):\n \n if h[j]==1:\n \n weights[j]=weights[j]*math.exp((-1)*alpha)\n else:\n weights[j]=weights[j]*math.exp(alpha)\n w=copy.deepcopy(weights)\n total=np.sum(w)\n for j in range(len(weights)):\n weights[j]=weights[j]/float(total)\n# print(\"Hello2 : \",i)\n #For test set\n test_predict=[]\n for i in range(len(test_data)):\n disc_func=[[] for i in class_label]\n for j in range(k_max):\n index=Ck[j].predict(np.array(test_data[i]).reshape(1,-1)).tolist()[0]\n if disc_func[index]==[]:\n disc_func[index].append(alpha_k[j])\n else:\n disc_func[index][0]+=alpha_k[j]\n test_predict.append(disc_func.index(max(disc_func)))\n\n test_acc=accuracy(test_predict,test_label) \n# For train set\n train_predict=[]\n# for i in range(len(train_data1)):\n# disc_func1=[[] for i in class_label]\n# for j in range(k_max):\n# index=Ck[j].predict(np.array(train_data1[i]).reshape(1,-1)).tolist()[0]\n# if disc_func1[index]==[]:\n# disc_func1[index].append(alpha_k[j])\n# else:\n# disc_func1[index][0]+=alpha_k[j]\n# train_predict.append(disc_func1.index(max(disc_func1)))\n\n# train_acc=accuracy(train_predict,train_label1) \n train_acc=0\n return Ck,alpha_k,train_predict,test_predict,train_acc,test_acc\n \n\n\n# In[63]:\n\n\ndef bagging(n,train_data1,train_label1,test_data,test_label,d):\n \n alpha_k=[]\n Ck=[]\n nat=[i for i in range(len(train_data1))]\n\n for i in range(n):\n sample = choice(nat, d,replace=True)\n train_data=[]\n train_label=[]\n for j in range(len(sample)):\n train_data.append(train_data1[sample[j]])\n train_label.append(train_label1[sample[j]])\n\n clf=DecisionTreeClassifier(max_depth=2,max_leaf_nodes=5)\n clf.fit(np.array(train_data),np.array(train_label))\n Ck.append(clf)\n #For test set\n test_predict=[]\n\n for i in range(len(test_data)):\n disc_func=[[] for i in class_label]\n for j in range(k_max):\n index=Ck[j].predict(np.array(test_data[i]).reshape(1,-1)).tolist()[0]\n if disc_func[index]==[]:\n disc_func[index].append(1)\n else:\n disc_func[index][0]+=1\n test_predict.append(disc_func.index(max(disc_func)))\n\n test_acc=accuracy(test_predict,test_label) \n train_acc=0\n# print(\"Accuracy in test data : \",acc)\n# #For training set\n train_predict=[]\n\n# for i in range(len(train_data1)):\n# disc_func=[[] for i in class_label]\n# for j in range(k_max):\n# index=Ck[j].predict(np.array(train_data1[i]).reshape(1,-1)).tolist()[0]\n# if disc_func[index]==[]:\n# disc_func[index].append(1)\n# else:\n# disc_func[index][0]+=1\n# train_predict.append(disc_func.index(max(disc_func)))\n\n# train_acc=accuracy(train_predict,train_label1) \n \n\n return Ck,train_predict,test_predict,train_acc,test_acc\n \n\n\n# In[64]:\n\n\ndef roc_design(prob_dist,testdata,checker):\n aux1=[]\n aux2=[]\n testdata1=copy.deepcopy(testdata)\n for i in range(len(testdata)):\n \n aux1.append(prob_dist[i])\n aux2.append(testdata[i])\n main1=sort_list(aux2, aux1)\n# print(\"Probability in incresing order : \",main1)\n \n tpr=[]\n fpr=[]\n #aux1 has prob_distribution and main1 has testlabel in sorted order\n \n main2=[]\n j=0\n for j in range(len(prob_dist)):\n main2.append(checker)\n i=0\n #Logic \n if (checker+1)==10:\n flag=checker-1\n else:\n flag=checker+1\n \n while i None:\n super(JobOutput, self).__init__(**kwargs)\n self.error = None\n self.state = None\n self.progress = None\n self.odatatype = None\n","sub_path":"azure-mgmt-media/azure/mgmt/media/models/job_output_py3.py","file_name":"job_output_py3.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"584645283","text":"from abstract_module import abstract\nfrom abstract_module.constants import *\nfrom math import cos, pi, sin, sqrt, atan2\nimport math_utils\nimport numpy as np\nfrom typing import List, Tuple\n\ndef closestObstacle(x: int, y: int, obstacles: List[Tuple[int, int]]) -> Tuple[int, int]:\n '''\n Given an obstacles list, return the obstacle closest to the robot\n '''\n last_ro = 0\n count = 0\n for obstacle in obstacles:\n obs_x, obs_y = obstacle\n delta_x, delta_y = math_utils.delta_axis(x, y, obs_x, obs_y)\n if not count:\n last_ro = math_utils.norm(delta_x, delta_y)\n if (math_utils.norm(delta_x, delta_y) <= last_ro):\n closer_obs = obstacle\n last_ro = math_utils.norm(delta_x, delta_y)\n count += 1\n \n return closer_obs\n\n\ndef Nh(phi: float) -> List[float]:\n return np.array([cos(phi), sin(phi)])\n\n\ndef phiAuf(obs_x: int, obs_y: int, r_x: int, r_y: int, r_o_dist:float, v_obs: list = abstract.v_obstacle(), v_rob: list = abstract.v_robot(), ko: float = ko) -> float: # Avoid Obstacles\n '''\n Returns an avoidance coefficient, relative to the obstacle, considering the obstacle's position and velocity, \n as well as the robot's position and velocity\n '''\n obstacle_position = np.array([obs_x, obs_y])\n\n s_vec = ko * (v_obs - v_rob)\n s_norm = math_utils.norm(s_vec[0], s_vec[1])\n obs_robot_dist = r_o_dist\n\n if obs_robot_dist >= s_norm:\n p_line_obs = obstacle_position + s_vec\n else:\n p_line_obs = obstacle_position + obs_robot_dist * s_vec / s_norm\n\n delta_x, delta_y = math_utils.delta_axis(p_line_obs[0], p_line_obs[1], r_x, r_y)\n phi_auf = phiR(delta_x, delta_y)\n \n return math_utils.wrapToPi(phi_auf)\n\n\ndef phiComposed(phi_tuf: float, phi_auf: float, R: float, obstacles: List[Tuple], delta: float = delta, d_min: float = d_min) -> float: # Composition\n '''\n Merges the avoidance and movement coefficients and returns a coefficient of movement, considering the obstacles and robot's position\n '''\n if obstacles is None:\n phi_composed = math_utils.wrapToPi(phi_tuf)\n else:\n gauss = math_utils.gaussian(R - d_min, delta)\n \n if R <= d_min:\n phi_composed = phi_auf\n else:\n # phi_composed = phi_auf * G(R - d_min, delta_const) + phi_tuf * (1 - G(R - d_min, delta_const))\n diff = math_utils.wrapToPi(phi_auf - phi_tuf)\n phi_composed = math_utils.wrapToPi(gauss * diff + phi_tuf) \n\n return math_utils.wrapToPi(phi_composed) \n\n\ndef phiH(rho: float, theta: float, cw: bool = False, radius: float = de, kr: float = kr) -> float: # Hyperbolic\n '''\n Returns a coefficient of a hyperbolic spiral that guides the robot to the ball\n '''\n '''\n The direction of rotation of the spiral has been inverted, cause by passing as in the article, \n the clockwise direction becomes counterclockwise and vice versa\n '''\n\n if rho > radius:\n angle = (pi / 2) * (2 - ((radius + kr) / (rho + kr)))\n elif 0 <= rho <= radius:\n angle = (pi / 2) * sqrt(rho / radius)\n\n if cw:\n return math_utils.wrapToPi(theta + angle)\n else:\n return math_utils.wrapToPi(theta - angle)\n\n\ndef phiR(d_x: float, d_y: float) -> float: # Repulsive\n '''\n Returns an avoidance coefficient, relative to the obstacle, considering nothing but the obstacle's \n position and the robot's position \n '''\n return atan2(d_y, d_x)\n\n\ndef phiTuf(theta: float, d_x: float, d_y: float, radius: float = de) -> float: # Move to Goal\n '''\n Merges a clockwise and a counterclockwise hyperbolic spiral and returns a coefficient of \n movement that guides the robot to the ball, following the smallest path \n '''\n y_l = d_y + radius\n y_r = d_y - radius\n\n ro_l = math_utils.norm(d_x, d_y - radius)\n ro_r = math_utils.norm(d_x, d_y + radius)\n\n phi_ccw = phiH(ro_l, theta, cw=True)\n phi_cw = phiH(ro_r, theta, cw=False)\n\n nh_ccw = Nh(phi_ccw)\n nh_cw = Nh(phi_cw)\n # The absolute value of y_l and y_r was not specified in the article, but the obtained results \n # with this trick are closer to the article images\n spiral_merge = (abs(y_l) * nh_ccw + abs(y_r) * nh_cw) / (2 * radius) \n\n if -radius <= d_y < radius:\n phi_tuf = atan2(spiral_merge[1], spiral_merge[0])\n elif d_y < -radius:\n phi_tuf = phiH(ro_l, theta, cw=False)\n else:\n phi_tuf = phiH(ro_r, theta, cw=True)\n\n return math_utils.wrapToPi(phi_tuf)\n\n\ndef generateUnivectorField(r_x: int, r_y: int, ball_pos: Tuple[int, int], obs_pos: List[Tuple[int, int]], de: float = de, v_obs: list = abstract.v_obstacle(), v_rob: list = abstract.v_robot(), ko: float = ko, delta: float = delta, d_min: float = d_min) -> float:\n\n ball_x, ball_y = ball_pos\n d_ball_x, d_ball_y = math_utils.delta_axis(ball_x, ball_y, r_x, r_y)\n theta = phiR(d_ball_x, d_ball_y)\n phi_tuf = phiTuf(theta, d_ball_x, d_ball_y, de)\n\n obstacle = closestObstacle(r_x, r_y, obs_pos)\n obs_x, obs_y = obstacle\n\n robot_obs_x, robot_obs_y = math_utils.delta_axis(obs_x, obs_y, r_x, r_y)\n R = math_utils.norm(robot_obs_x, robot_obs_y)\n robot_obs_dist = math_utils.norm(robot_obs_x, robot_obs_y)\n \n phi_auf = phiAuf(obs_x, obs_y, r_x, r_y, robot_obs_dist, v_obs, v_rob, ko)\n phi_composed = phiComposed(phi_tuf, phi_auf, R, obstacle, delta, d_min)\n\n return Nh(phi_composed)","sub_path":"univector.py","file_name":"univector.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"596047892","text":"from scipy import optimize\nimport numpy as np\nimport pylab as pl\n\npl.rc('axes', linewidth=2)\n\n# set up your read_array to use later to read in your file\ndef read_array(filename, dtype, separator='\\t'):\n \"\"\" Read a file with an arbitrary number of columns.\n The type of data in each column is arbitrary\n It will be cast to the given dtype at runtime\n \"\"\"\n cast = np.cast\n data = [[] for dummy in range(len(dtype))]\n for line in open(filename, 'r'):\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(number)\n for i in range(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return np.rec.array(data, dtype=dtype)\n\n# now read in your file -- the line below gives examples of datatypes\n#mydescr = np.dtype([('column1', 'int32'), ('column2Name', 'uint32'), ('col3', 'uint64'), ('c4', 'float32')])\nmydescr = np.dtype([('xpos', 'float32'), ('ypos', 'float32'),('yerr', 'float32')])\nmyrecarray = read_array('CalibAfterChvsEn.txt', mydescr)\n\n# put in a small error on the x measurement\nxerr = 0.001\n\n# here we plot the data with error bars\npl.errorbar(myrecarray.xpos,myrecarray.ypos,myrecarray.yerr,xerr)\n\n# now we want to do a least squares fit to the data -- a straight line\n# here is our function that we fit \ndef func(x, a, b):\n return a + b*x\n\n\n# Initial guess for a and b, the parameters of the fit\nx0 = np.array([1.0, 0.1])\nsigma = myrecarray.yerr\n\n\nprint (optimize.curve_fit(func, myrecarray.xpos, myrecarray.ypos, x0, sigma))\n\n# Change size and font of tick labels\n# Again, this doesn't work in interactive mode.\nfontsize = 14\nax = pl.gca()\nfor tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\nfor tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n\npl.xlabel('energy', fontsize=16, fontweight='bold')\npl.ylabel('channel', fontsize=16, fontweight='bold')\n\n# save the plot to a file\npl.savefig('HEP.png', bbox_inches='tight')\n# display the plot so you can see it\npl.show()\n\n\n\n\n\n\n\n\n\n","sub_path":"Compton Effect/Lab Data/2/fitHEP_e1.py","file_name":"fitHEP_e1.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"187104419","text":"\"\"\"tracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom webapp.views import IndexView, \\\n ArticleView, ArticleDeleteView, \\\n ArticleCreateView, ArticleUpdateView, \\\n StatusListView, StatusView, StatusDeleteView, \\\n StatusCreateView, StatusUpdateView, TypeListView, \\\n TypeView, TypeCreateView, TypeUpdateView, TypeDeleteView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', IndexView.as_view(), name='index'),\n path('article/', ArticleView.as_view(), name='form_view'),\n path('article/add/', ArticleCreateView.as_view(), name='article_create_view'),\n path('article//edit/', ArticleUpdateView.as_view(), name='form_update'),\n path('article//delete/', ArticleDeleteView.as_view(url='http://localhost:8000/'), name='delete_form'),\n path('status/', StatusListView.as_view(), name='status_index'),\n path('status/', StatusView.as_view(), name='status_view'),\n path('status//delete/', StatusDeleteView.as_view(url='http://localhost:8000/status/#'),\n name='delete_status'),\n path('status/add/', StatusCreateView.as_view(), name='status_create_view'),\n path('status//edit/', StatusUpdateView.as_view(), name='status_update'),\n path('type/', TypeListView.as_view(), name='type_index'),\n path('type/', TypeView.as_view(), name='type_view'),\n path('type/add/', TypeCreateView.as_view(), name='type_create_view'),\n path('type//edit/', TypeUpdateView.as_view(), name='type_update'),\n path('type//delete/',\n TypeDeleteView.as_view(url='http://localhost:8000/type/#'),\n name='delete_type'),\n]\n","sub_path":"tracker_src/tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"376627314","text":"Import('env', 'plugins', 'os', 'sys')\n \nlibGtest = env.StaticLibrary('gtest', [ \n 'gtest-death-test.cc',\n 'gtest-filepath.cc',\n 'gtest-port.cc',\n 'gtest-printers.cc',\n 'gtest-test-part.cc',\n 'gtest-typed-test.cc',\n 'gtest.cc',\n 'gtest_main.cc'\n])\n\ngtestEnv = env.Clone()\ngtestEnv.Append(LIBS=['gtest']) \ngtestEnv.Append(LIBPATH=[os.path.join(env['BUILDDIR'], 'gtest')])\nExport('gtestEnv')","sub_path":"src/gtest/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"397325268","text":"import json\nimport requests\nfrom datetime import datetime\nfrom logging import Handler, Formatter\n\nPATH_TO_TELEGRAM_SECRET = \"/home/analytics/.credentials/Telegram_Bot/credentials.json\"\ncredentials = json.loads(open(PATH_TO_TELEGRAM_SECRET).read())\n\n\nclass TelegramHandler(Handler):\n \"\"\"\n Fork of standart handler for working with telegram bots API\n \"\"\"\n\n def emit(self, record):\n log_entry = self.format(record)\n payload = {\n 'chat_id': credentials['TELEGRAM_CHAT_ID'],\n 'text': log_entry,\n 'parse_mode': 'HTML'\n }\n return requests.post(\"https://api.telegram.org/bot{token}/sendMessage\" \\\n .format(token=credentials['TELEGRAM_TOKEN']), data=payload).content\n\n\nclass TelegramFormatter(Formatter):\n \"\"\"\n Format for telegram messages\n \"\"\"\n\n def __init__(self):\n super(TelegramFormatter, self).__init__()\n\n def format(self, record):\n return \"[{levelname}|{file_name}]\" \\\n \"\\n{message}\" \\\n \"\\n\\n{datetime}\".format(message=record.msg,\n datetime=datetime.now().strftime('%b-%d-%Y %H:%M:%S'),\n levelname=record.__dict__['levelname'],\n file_name=record.__dict__['pathname']\n )\n","sub_path":"modules/handler/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"155182067","text":"from src.utils.tk import TKUtils\n\nfrom src.view.home.filtro import Filtro\nfrom src.view.home.actions import Actions\nfrom src.view.home.listagem import ListaDeElementos\nfrom src.view.home.cadastro import (FormularioApresentacao, FormularioTarefa,\n FormularioEvento)\n\n\nclass Home(TKUtils.obter_container()):\n\n def __init__(self):\n super().__init__()\n\n self.defs.pack['side'] = 'bottom'\n\n self.filtro = Filtro()\n self.actions = Actions()\n self.listagem = ListaDeElementos()\n self.cadastro_evento = FormularioEvento()\n self.cadastro_tarefa = FormularioTarefa()\n self.cadastro_apresentacao = FormularioApresentacao()\n\n def iniciar(self, master):\n super().iniciar(master=master)\n\n self.filtro.iniciar(master=self)\n self.actions.iniciar(master=self)\n self.listagem.iniciar(master=self)\n\n self.ocultar()\n","sub_path":"src/view/home/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"38029974","text":"fin = open('paint.in', 'r')\nfout = open('paint.out', 'w')\n\na, b = map(int, fin.readline().split())\nc, d = map(int, fin.readline().split())\n\nif c < a: # cases are symmetric\n\ta, c = c, a\n\tb, d = d, b\n\nanswer = (b-a) + (d-c)\n\nif b > c: # overlap\n\tanswer = max(b,d) - a\n\nfout.write(str(answer) + '\\n')\nfout.close()\n\n# Solution 2\n#for i in range(100):\n#\tif i >= a and i+1 <= b:\n#\t\tanswer += 1\n#\telif i >= c and i+1 <= d:\n#\t\tanswer += 1\n#fout.write(str(answer) + '\\n')\n#fout.close()\n","sub_path":"bronze/paint/paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"38244152","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Use text editor to edit the script and type in valid Instagram username/password\n\nfrom InstagramAPI import InstagramAPI\n\napi = InstagramAPI(\"\", \"\")\n\ndef sendDM(api,user_id,message):\n # api.searchUsername(username)\n # response = api.LastJson\n # user_id = response['user']['pk']\n mediaId = '1469246128228859784_1520786701' #i dont know what this is but was on sourse code\n recipients = [user_id]\n api.direct_message(message, user_id)\n\ndef getTotalFollowers(api, username):\n \"\"\"\n Returns the list of followers of the user.\n It should be equivalent of calling api.getTotalFollowers from InstagramAPI\n \"\"\"\n api.searchUsername(username)\n response = api.LastJson\n user_id = response['user']['pk']\n\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowers(user_id, maxid=next_max_id)\n followers.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return followers\n\ndef getTotalFollowing(api, username):\n \"\"\"\n Returns the list of followers of the user.\n It should be equivalent of calling api.getTotalFollowers from InstagramAPI\n \"\"\"\n api.searchUsername(username)\n response = api.LastJson\n user_id = response['user']['pk']\n\n following = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = api.getUserFollowings(user_id, maxid=next_max_id)\n following.extend(api.LastJson.get('users', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n return following\n\nif (api.login()):\n #usersList = [\"lewis_boughtflower\",\"sofiaxsharp_\",\"spencermensah\"]\n # usersList = [\"spencermensah\"]\n #\n # for names in usersList:\n # sendDM(api,names)\n\n followers = getTotalFollowing(api, \"pechee.__\")\n\n message = \"hey xx\"\n\n for follower in followers:\n userId = follower['pk']\n sendDM(api,userId,message)\n print(follower['username'])\n\n # usersList = [\"spencermensah\",\"lewis_boughtflower\"]\n #\n # for names in usersList:\n # sendDM(api,names,message)\n\nelse:\n print(\"Can't login!\")\n","sub_path":"forex.py","file_name":"forex.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"548147689","text":"#!/usr/bin/env python3\n\n\"\"\"\nPython script to downgrade a double precision Fortran routine to single\n(real4) precision without changing its external signature, for example\nto study the effect of reduced precision arithmetic within only one\nsubroutine that is part of a larger piece of software.\n\nThe script takes the following arguments:\n 1. The file name to be read. A will be created with _real4 appended\n to the name.\n 2. An optional argument specifying the name of the subroutine that is\n to be treated. If the argument is not given, all subroutines in\n the file will be modified.\n\nLimitations:\n - Currently only F77 files are supported. This could be easily fixed by\n using the fortran.two parser for F90 files and adjusting some of the\n node types, e.g. not only test for fparser.one.statements.Assignment\n but also for f2003.Assignment_Stmt\n - Currently only files with one subroutine and nothing else are\n supported. No modules, no classes, nothing. This could also easily\n fixed, by discovering subroutine nodes in a recursive AST search just\n like what is already happening to discover assignments\n - Whether a variable is first written or first read is determined\n lexicographically, not by building the flow graph. This means that\n branches or even goto statements can trick this analysis. It should\n still be enough for cases where variables are either read-only or\n write-only. Fixing this would be more difficult.\n - Read and write access to variables is only detected in assignments,\n for example in 'foo(i) = bar + z(g)' we would detect 'foo' as being\n written, and 'bar', 'z' as being read. No other access is detected,\n for example a subroutine or function call would not result in the\n arguments being added to the read and write lists.\n\"\"\"\n\nimport fparser.one.parsefortran\nimport fparser.common.readfortran\nimport sys, re\nimport textwrap\n\n# Read arguments\nfilename = sys.argv[1]\nverbose = False\nif(sys.argv[1] == '-verbose'):\n filename = sys.argv[2]\n verbose = True\ndef printv(arg):\n if verbose:\n print(arg)\nfilename_preduced = \"%s_preduced.f\"%(filename[0:-2])\nunitname = None\nif(len(sys.argv)>3):\n unitname = sys.argv[2]\n if(sys.argv[1] == '-verbose'):\n unitname = sys.argv[3]\nif(unitname == None):\n printv(\"preducer downgrading the precision of all subroutines in file %s.\"%(filename))\nelse:\n printv(\"preducer downgrading the precision of subroutine \\\"%s\\\" in file %s.\"%(unitname,filename))\n\ndef cleanVariableName(var):\n \"\"\"\n A reference to a variable can be something messy like \"BaR(3,foo),\", where\n all we want is the variable name \"bar\". This function removes array\n indices, trailing commas etc, and makes everything lowercase, to get only\n a clean variable name and nothing else.\n \"\"\"\n return re.split('[,(]',var)[0].lower()\n\ndef find_vars(varstring):\n current_varname = ''\n varlist = list()\n parentheses_depth = 0\n for i, c in enumerate(varstring):\n if c == '(':\n parentheses_depth += 1\n elif c == ')':\n parentheses_depth -= 1\n elif parentheses_depth == 0:\n if c != ' ' and c != '\\t':\n if(c == ','):\n if(len(current_varname.strip())>0):\n varlist.append(current_varname)\n current_varname = ''\n else:\n current_varname += c\n varlist.append(current_varname)\n if(varlist[0].lower()=='doubleprecision'):\n del(varlist[0])\n else:\n varlist[0] = varlist[0][15:]\n return varlist\n\ndef visitDoublePrecisionStmt(node):\n \"\"\"\n The f77 parser treats a line containing a double precision variable\n declaration as a Line, which is a string of characters. We need to extract\n the variable names from that string, and not get confused by arrays. For\n example, \"double precision foo(a,3), bar\" should give us the variables\n \"foo\" and \"bar\", and nothing else.\n \"\"\"\n if(type(node)!=fparser.one.typedecl_statements.DoublePrecision):\n raise Exception(\"visitDoublePrecisionStmt called on wrong node type\")\n slist = find_vars(node.item.line)\n varset = set()\n for s in slist:\n varname = cleanVariableName(s)\n varset.add(varname) # add this variable name to set\n return varset\n\ndef visitNode(node,doublevars,doublevars_modified):\n \"\"\"\n Recursively go through the AST and find all assignments.\n This is needed to find variables that are read before modified, and\n variables that are modified at all.\n \"\"\"\n children = []\n doublevars_predefined = set()\n if hasattr(node, \"content\"):\n children = node.content\n elif hasattr(node, \"items\"):\n children = node.items\n elif type(node) in (tuple, list):\n children = node\n for child in children:\n if(type(child)==fparser.one.statements.Assignment):\n lhs = cleanVariableName(child.variable)\n # Visit an assignment statement, e.g. \"a = b + c\"\n if(lhs in doublevars):\n doublevars_modified.add(lhs)\n rhs = child.expr\n readDoubleVars = set(filter(lambda x: x in rhs, doublevars))\n doublevars_predefined = doublevars_predefined.union(readDoubleVars.difference(doublevars_modified))\n else:\n newmodified, newpredefined = visitNode(child, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n return doublevars_modified, doublevars_predefined\n\ndef f77linebreaks(instr):\n \"\"\"\n Takes a string as an input, and breaks all lines after at most 72\n characters, using F77 line continuation markers.\n \"\"\"\n outstr = ''\n for l in instr.splitlines():\n if(len(l.strip())==0): # empty line\n outstr += l+'\\n'\n elif(l[0]!=' ' or l.lstrip()[0]=='!'): # comment line, never touch those\n outstr += l+'\\n'\n else:\n if(len(l) > 7 and l[0:7].strip().isnumeric()): # workaround for parser bug: numeric line labels are printed with an incorrect blank space in column 1. Remove this.\n l = l[0:7].strip().ljust(7) + l[7:]\n while(len(l) > 72):\n outstr += l[0:71]+'\\n'\n l = ' *'+l[71:]\n outstr += l+'\\n'\n return outstr\n\ndef real4subroutine(unit, file, allunits):\n # Analysis part: Find the subroutine that needs to be modified,\n # and for that subroutine, find the double precision arguments\n # and for each of those, find out whether they are in/outputs.\n args = unit.args.copy()\n if(unit.blocktype == 'function'):\n args.append(unit.name)\n printv(args)\n doublevars = set() # all double precision variables declared within subroutine\n doublevars_predefined = set() # all double precision variables read before being modified\n doublevars_modified = set() # all double precision variables modified within subroutine\n decls = list()\n for c in unit.content:\n decltypes = [fparser.one.typedecl_statements.Byte,\n fparser.one.typedecl_statements.Character,\n fparser.one.typedecl_statements.Complex,\n fparser.one.typedecl_statements.DoubleComplex,\n fparser.one.typedecl_statements.DoublePrecision,\n fparser.one.typedecl_statements.Integer,\n fparser.one.typedecl_statements.Logical,\n fparser.one.typedecl_statements.Real,\n fparser.one.statements.Parameter]\n if(type(c) in decltypes):\n decls.append(c)\n if(type(c) == fparser.one.typedecl_statements.DoublePrecision):\n doublevars = doublevars.union(visitDoublePrecisionStmt(c))\n else:\n newmodified, newpredefined = visitNode(c, doublevars, doublevars_modified)\n doublevars_modified = doublevars_modified.union(newmodified)\n doublevars_predefined = doublevars_predefined.union(newpredefined)\n doubleargs_modified = doublevars_modified.intersection(args)\n doubleargs_predefined = doublevars_predefined.intersection(args)\n printv(\"local double precision variables: %s\"%doublevars.difference(args).__str__())\n printv(\"double precision arguments: %s\"%doublevars.intersection(args).__str__())\n printv(\" - modified: %s\"%(doubleargs_modified.__str__()))\n printv(\" - input: %s\"%(doubleargs_predefined.__str__()))\n printv(\" - unused: %s\"%(doublevars.intersection(args).difference(doubleargs_predefined.union(doubleargs_modified)).__str__()))\n\n # Cloning part: Create a subroutine that has the same body as the original\n # one, but uses the new precision throughout and append _sp to its name\n fclone = unit.tofortran()\n fclone = fclone.replace('DOUBLEPRECISION','REAL')\n if(unit.blocktype == 'function'):\n fclone = re.sub('FUNCTION %s'%unit.name,'FUNCTION %s_sp'%unit.name, fclone, flags=re.IGNORECASE)\n else:\n fclone = re.sub('SUBROUTINE %s'%unit.name,'SUBROUTINE %s_sp'%unit.name, fclone, flags=re.IGNORECASE)\n for otherunit in allunits:\n fclone = re.sub('CALL %s\\('%otherunit.name, 'CALL %s_sp('%otherunit.name, fclone, flags=re.IGNORECASE)\n fclone = re.sub('1.0d308', '1.0e38', fclone, flags=re.IGNORECASE)\n fclone = f77linebreaks(fclone)\n file.write(fclone)\n file.write('\\n\\n')\n\n # Wrapper part: Create a subroutine that has the signature of the original\n # one, and performs the down-cast/call/up-cast to the reduced precision\n # subroutine.\n args_str = \", \".join(unit.args)\n args_sp = args_str\n for dv in doublevars:\n args_sp = re.sub(r\"\\b%s\\b\" % dv , '%s_sp'%dv, args_sp)\n decls_sp = list()\n for d in decls:\n if(type(d) == fparser.one.typedecl_statements.DoublePrecision):\n varnames = visitDoublePrecisionStmt(d)\n d_sp = d.item.line.replace('DOUBLE PRECISION','REAL').lower()\n for vn in varnames:\n d_sp = re.sub(r\"\\b%s\\b\" % vn , '%s_sp'%vn, d_sp)\n decls_sp.append(d_sp)\n decls_sp.append(d.item.line)\n decls_sp = \"\\n\".join(decls_sp)\n copyin = set()\n for dm in doubleargs_predefined:\n copyin.add(\"%s_sp = %s\"%(dm,dm))\n copyin = \"\\n\".join(copyin)\n copyout = set()\n for dm in doubleargs_modified:\n copyout.add(\"%s = %s_sp\"%(dm,dm))\n copyout = \"\\n\".join(copyout)\n if(unit.blocktype == 'function'):\n wrapper = \"double precision function %s(%s)\\n%s\\n%s\\n%s = %s_sp(%s)\\n%s\\nreturn\\nend function\"%(unit.name,args_str,decls_sp,copyin,unit.name,unit.name,args_sp,copyout)\n else:\n wrapper = \"subroutine %s(%s)\\n%s\\n%s\\ncall %s_sp(%s)\\n%s\\nend subroutine\"%(unit.name,args_str,decls_sp,copyin,unit.name,args_sp,copyout)\n wrapper = f77linebreaks(textwrap.indent(wrapper,7*' '))\n file.write(wrapper)\n\n# Parse Fortran file\nreader = fparser.common.readfortran.FortranFileReader(filename)\nfp = fparser.one.parsefortran.FortranParser(reader)\nfp.parse()\n\nif(len(fp.block.content) == 0):\n print(\"Warning: Preducer called on empty file %s\"%(filename))\n from shutil import copyfile\n copyfile(filename, filename_preduced)\n exit()\nwith open(filename_preduced,'w') as file:\n for unit in fp.block.content:\n if(unit.blocktype != 'subroutine' and unit.blocktype != 'function'):\n raise Exception(\"Top Unit is neither subroutine nor function\")\n if(unitname == None or unit.name == unitname):\n real4subroutine(unit, file, fp.block.content)\n","sub_path":"preducer.py","file_name":"preducer.py","file_ext":"py","file_size_in_byte":11646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"128197635","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision import models\n#from matplotlib import pyplot as plt\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, TensorDataset\nBatchSize = 1\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndef recreate_image(im_as_var):\n \"\"\"\n Recreates images from a torch variable, sort of reverse preprocessing\n\n Args:\n im_as_var (torch variable): Image to recreate\n\n returns:\n recreated_im (numpy arr): Recreated image in array\n \"\"\"\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n recreated_im = torch.clone(im_as_var)\n recreated_im = recreated_im.cpu().numpy()[0]\n for c in range(3):\n recreated_im[c] /= reverse_std[c]\n recreated_im[c] -= reverse_mean[c]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im).transpose(1, 2, 0)\n return recreated_im\n\n\"\"\"\ndef load_imagenet(PATH = \"./data/\"):\n transform = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n\n dataset = torchvision.datasets.ImageNet(root=PATH, split='val',\n transform=transform)\n return dataset\n\"\"\"\n\ndef predict_torch(model, img):\n \"\"\"\n with torch.no_grad():\n torch_img = torch.from_numpy(img)\n torch_output = model(torch_img)\n \"\"\"\n torch_output = model(img)\n return torch_output\n\n\ndef thundernna_attack(img, target, model, epsilon):\n model.eval()\n img.requires_grad = True\n output = predict_torch(model, img)\n loss = F.nll_loss(output, target)\n model.zero_grad()\n loss.backward()\n data_grad = img.grad.data\n tub = torch.clamp(torch.ones_like(img) / data_grad, -epsilon, epsilon)\n tub = torch.nan_to_num(tub)\n return img + tub\n\n\"\"\"\npretrained_model = models.resnet18(pretrained=True, progress = True).to(device)\npretrained_model.eval()\n\ndataset = load_imagenet()\n\ntest_loader = torch.utils.data.DataLoader(dataset, batch_size=BatchSize, shuffle=False)\nepsilon = 0.3\ncorrect = 0\nfor idx, (img, target) in tqdm(enumerate(test_loader)):\n perturbed_img =thundernna_attack(img, target, pretrained_model, epsilon)\n out = predict_torch(pretrained_model, perturbed_img)\n final_pred = out.data.max(1, keepdim=True)[1]\n correct += final_pred.eq(target.data.view_as(final_pred)).sum()\nfinal_acc = correct/float(len(test_loader.dataset))\nprint(\"Epsilon: {}\\tTest Accuracy = {} / {} = {}\".format(epsilon, correct, len(test_loader.dataset), final_acc))\n\"\"\"\n\n","sub_path":"thundernna.py","file_name":"thundernna.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"561749318","text":"import numpy as np\nimport matplotlib\n\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom featureNormalize import featureNormalize\nfrom computeCostMulti import computeCostMulti\nfrom gradientDescentMulti import gradientDescentMulti\nfrom normalEqn import normalEqn\n\ndef main():\n # ======================= Part 1: Feature Normalization ===============\n print('Loading data ...')\n # Load Data\n data = np.loadtxt('ex1data2.txt', delimiter=',')\n X = data[:, 0:2]\n y = data[:, 2:]\n m = len(y)\n print('First 10 examples from the dataset:')\n for i in range(10):\n print('x =', X[i], ', y =', y[i])\n\n # Scale features and set them to zero mean\n print('Normalizing Features ...')\n X, mu, sigma = featureNormalize(X)\n\n # Add intercept term to X\n X = np.hstack((np.ones((m, 1)), X)) # Add a column of ones to x\n\n # ======================= Part 2: Gradient Descent ========================\n print('Running gradient descent ...')\n # Choose some alpha value\n alpha = 0.01\n num_iters = 400\n\n # Init Theta and Run Gradient Descent\n theta = np.zeros((3, 1))\n theta, J_history = gradientDescentMulti(X, y, theta, alpha, num_iters)\n\n # Plot the convergence graph\n plt.plot(np.arange(1, len(J_history) + 1), J_history, label='Linear regression')\n plt.xlabel('Number of iterations')\n plt.ylabel('Cost J')\n plt.ion()\n plt.show()\n input('Program paused. Press enter to continue.')\n\n # Display gradient descent's result\n print('Theta computed from gradient descent:', theta, sep='\\n')\n\n # Estimate the price of a 1650 sq-ft, 3 br house\n x = np.append(np.array([1]), np.array([1650, 3] - mu) / sigma)\n price = np.dot(x, theta)\n print('Predicted price of a 1650 sq-ft, 3 br house (using gradient descent): $', price, sep='')\n\n # ======================= Part 3: Normal Equations ========================\n print('Solving with normal equations...')\n # Load Data\n data = np.loadtxt('ex1data2.txt', delimiter=',')\n X = data[:, 0:2]\n y = data[:, 2:]\n m = len(y)\n\n # Add intercept term to X\n X = np.hstack((np.ones((m, 1)), X)) # Add a column of ones to x\n\n # Calculate the parameters from the normal equation\n theta = normalEqn(X, y)\n\n # Display normal equation's result\n print('Theta computed from the normal equations:', theta, sep='\\n')\n\n # Estimate the price of a 1650 sq-ft, 3 br house\n price = np.dot(np.array([1, 1650, 3]), theta)\n print('Predicted price of a 1650 sq-ft, 3 br house (using normal equations): $', price, sep='')\n\nif __name__ == '__main__':\n main()\n","sub_path":"ml/ex1/ex1_multi.py","file_name":"ex1_multi.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"3121394","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 204. Count Primes\n# Count the number of prime numbers less than a non-negative number, n.\n\n\n# 总结:\n# https://discuss.leetcode.com/topic/14036/fast-python-solution\n\n\nclass Solution(object):\n\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 2:\n return 0\n\n prime = [True] * n\n prime[:2] = [False, False]\n for base in xrange(2, int((n - 1) ** 0.5) + 1):\n if prime[base]:\n prime[base ** 2::base] = [False] * len(prime[base ** 2::base])\n return sum(prime)\n\nimport unittest\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_demo(self):\n self.assertEqual(1, 1)\n self.assertTrue(True)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestSolution)\nunittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == '__main__':\n print('ok')\n","sub_path":"py/p204.py","file_name":"p204.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"409983206","text":"from multiprocessing import Pool\nimport os, time, random\n\n\ndef worker(msg):\n t_start = time.time()\n print(\"%s 号事件开始执行,进程号为%d\" % (msg, os.getpid()))\n time.sleep(random.random()*2)\n t_stop = time.time()\n print(msg, \"号事件执行完毕,耗时%0.2f秒\" % (t_stop-t_start))\n\n\ndef main():\n pool = Pool(2) # 定义一个进程池,最大容量为3\n for i in range(5):\n # Pool().apply_async(要调用的目标,(传递给目标的参数))\n # 每次循环会用空闲的子进程调用目标\n pool.apply_async(worker, (i,)) # 往进程池添加任务\n print(\"-----start----\")\n pool.close() # 关闭进程池后,pool不再接收新的请求\n # 等待pool中的所有子进程执行完,必须放在close后\n # 若是没有join,则主进程提前结束,所有子进程消亡\n pool.join() # 主进程不会等,则需要手动阻塞\n print(\"-----end-----\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"MultipleTask/process_pool.py","file_name":"process_pool.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"508064754","text":"\ndef add_username_segment():\n if powerline.args.shell == 'bash':\n user_prompt = ' \\\\u '\n elif powerline.args.shell == 'zsh':\n user_prompt = ' %n '\n else:\n import os\n user_prompt = ' %s ' % os.getenv('USER')\n\n powerline.append(user_prompt, Color.USERNAME_FG, Color.USERNAME_BG)\n\nadd_username_segment()\n","sub_path":"segments/username.py","file_name":"username.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"471021347","text":"from epi import epimanager as EpiManager\nimport json\nimport sys\n\n#N4D es python2 y LlX Remote Installer es Python3, no puedo trabajar con librerias de EPI. Utilizo este script a modo de puente.\ntry:\n\tepi_operation=sys.argv[1]\n\n\tepi=EpiManager.EpiManager()\n\tepi_to_exec=\"epi.\"+epi_operation\n\n\tepi_solved={}\n\texec(\"epi_solved['val']=%s\"%epi_to_exec)\n\n\tdata=json.dumps(epi_solved['val'])\n\tprint (data)\n\nexcept Exception as e:\n\tprint('False')\n","sub_path":"lliurex-remote-installer-gui.install/usr/share/lliurex-remote-installer/helper_epi.py","file_name":"helper_epi.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"50596179","text":"#!/usr/bin/python2.7\n# Copyright 2012 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\n\nfrom solurank.configs import config\nfrom solurank.db.db import db\nfrom solurank.models import model\n\n\nOWNER = 1\nCOMMENT = 2\nFAVORITE = 4\n\n\nclass UserParticipateModel(model.Model):\n\n\n ModelPath = \"user.participate\"\n\n\n @classmethod\n def UpdateUserParticipate(cls, uid=None, pid=None, aid=None, view_time=None,\n update_time=None, reason=None, prevent_uid=None,\n prevent_spread=False, problem_scope=False):\n query = { \"pid\": pid and int(pid) }\n if uid is not None: query[\"uid\"] = int(uid)\n if not problem_scope: query[\"aid\"] = aid and int(aid)\n\n processed = False\n for doc in db[cls.ModelPath].find(query):\n processed = True\n if doc[\"uid\"] == prevent_uid:\n continue\n if view_time is not None:\n doc[\"view_time\"] = view_time\n if update_time is not None:\n doc[\"update_time\"] = update_time\n doc[\"news\"] = (doc[\"view_time\"] and doc[\"update_time\"] and\n doc[\"view_time\"] < doc[\"update_time\"])\n if reason is not None:\n doc[\"reason\"] |= reason\n db[cls.ModelPath].save(doc)\n if not processed:\n doc = query\n doc[\"view_time\"] = view_time or update_time\n doc[\"update_time\"] = update_time\n doc[\"reason\"] = reason\n db[cls.ModelPath].save(doc)\n elif uid is not None and update_time is not None and not prevent_spread:\n cls.UpdateUserParticipate(pid=pid, aid=aid, update_time=update_time,\n prevent_uid=prevent_uid)\n\n\n @classmethod\n def RemoveUserParticipate(cls, uid=None, pid=None, aid=None,\n problem_scope=False, reason=-1):\n query = { \"pid\": pid and int(pid) }\n if uid is not None: query[\"uid\"] = int(uid)\n if not problem_scope: query[\"aid\"] = aid and int(aid)\n\n for doc in db[cls.ModelPath].find(query):\n doc[\"reason\"] &= ~reason\n if not doc[\"reason\"]:\n db[cls.ModelPath].remove({\"_id\": doc[\"_id\"]})\n else:\n db[cls.ModelPath].save(doc)\n\n\n @classmethod\n def GetUserParticipates(cls, uid, news_only=True, page=1):\n from solurank.models import answer\n from solurank.models import problem\n query = {\"uid\": int(uid)}\n if news_only:\n query[\"news\"] = True\n sort = [(\"update_time\", -1), (\"view_time\", -1)]\n else:\n sort = [(\"_id\", -1)]\n messages = []\n for doc in db[cls.ModelPath].find(\n query, sort=sort, skip=(page - 1) * config.SinglePageMessageNumber,\n limit=config.SinglePageMessageNumber):\n if \"pid\" in doc:\n messages.append(problem.ProblemModel.GetProblem(doc[\"pid\"]))\n else:\n messages.append(answer.AnswerModel.GetAnswer(doc[\"aid\"]))\n return messages\n\n\n @classmethod\n def GetUserParticipateCount(cls, uid, news_only=True):\n query = {\"uid\": int(uid)}\n if news_only:\n query[\"news\"] = True\n return cls.Count(query)\n","sub_path":"solurank/models/user_participate.py","file_name":"user_participate.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"377587653","text":"class Solution:\n def highestRankedKItems(self, grid: List[List[int]], pricing: List[int], start: List[int], k: int) -> List[List[int]]:\n n = len(grid)\n m = len(grid[0])\n arr = []\n b = [[-1] * m for i in range(n)]\n b[start[0]][start[1]] = 0\n q = Deque([(0, start)])\n d = ((0,1), (1,0), (0,-1), (-1,0))\n while q:\n h, (x, y) = q.popleft()\n for dx, dy in d:\n xx = x + dx\n yy = y + dy\n if 0<=xx 0:\n storeProduct = Product.objects.get(id=int(form.cleaned_data['product_id']))\n needAmount = form.cleaned_data['amount']\n\n if storeProduct.stock >= needAmount:\n orderProduct = ProductOrder.objects.create(\n product=storeProduct,\n amount=needAmount\n )\n storeProduct.change_stock(needAmount)\n storeProduct.save()\n orderProduct.save()\n productsArr.append(orderProduct)\n else:\n return redirect(reverse('new_order') + '?error=1&name=' + storeProduct.name + '&amount=' + str(storeProduct.stock))\n\n order = Order.objects.create(\n user=user\n )\n\n price = 0\n for p in productsArr:\n price += (p.amount * p.product.price_brutto)\n order.products.add(p)\n\n order.full_brutto_price = price\n order.save()\n return redirect(reverse('new_order') + '#list')\n\n products = Product.objects.all()\n data_formset = []\n\n orders = Order.objects.filter(user=user)\n\n notRealizedOrders = False\n if user.is_staff:\n notRealizedOrders = Order.objects.filter(realized=False)\n\n for product in products:\n data_formset.append({'product_name': product.name, 'product_id': product.id, 'amount': 0, 'price_per_item': product.price_brutto})\n\n formset = OrderFormSet(initial=data_formset)\n\n errorName = request.GET.get('name', False)\n errorAmount = request.GET.get('amount', 0)\n\n return render(request, 'admin_panel/client/client-form.html', {\n 'user': user,\n 'products': products,\n 'formset': formset,\n 'orders': orders,\n 'notRealizedOrders': notRealizedOrders,\n 'errorName': errorName,\n 'errorAmount': errorAmount\n })\n\n\n@login_required(login_url='/admin/login/?next=/admin-panel/')\ndef accept_order(request, num=\"0\"):\n\n order = Order.objects.get(id=num)\n\n for productOrder in order.products.all():\n productOrder.product.freeze_stock -= productOrder.amount\n productOrder.product.save()\n\n order.realized = True\n order.save()\n\n return redirect(reverse('new_order') + '#list-no-realize')\n","sub_path":"warehouse/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"113629926","text":"#!/usr/bin/env python3\n#\n# Copyright 2019 ROBOTIS CO., LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Darby Lim, Pyo\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.actions import LogInfo\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import Node\n\n\ndef generate_launch_description():\n # [RECOMMENDED] If you want handle arguments out of this launch file,\n # you have to set LaunchConfiguration\n # If you set arguments below, you can't access topic_name CLI or other launch files\n # qos_profile = 0\n\n # 0: Default QoSProfile(depth=10)\n qos_profile = LaunchConfiguration('qos_profile', default=0)\n namespace = LaunchConfiguration('ns', default='example')\n\n return LaunchDescription([\n LogInfo(msg=['Execute two ''publisher''s has different node name!!']),\n\n # [RECOMMENDED] This func allows you to expose the arguments\n DeclareLaunchArgument(\n 'topic_name',\n default_value='count',\n description='Specifying topic name to publisher'),\n\n DeclareLaunchArgument(\n 'qos_profile',\n default_value=qos_profile,\n description='Specifying qos_profile to publisher. Default QoSProfile(depth=10)'),\n\n DeclareLaunchArgument(\n 'namespace',\n default_value='ns',\n description='Specifying namespace to node'),\n\n Node(\n node_namespace=namespace,\n package='examples_rclcpp',\n node_executable='publisher',\n node_name='first_pub',\n parameters=[{'message': 'First Pub'}],\n arguments=['-q', qos_profile],\n output='screen'),\n\n Node(\n node_namespace=namespace,\n package='examples_rclcpp',\n node_executable='publisher',\n node_name='second_pub',\n parameters=[{'message': 'Second Pub'}],\n arguments=['-q', qos_profile],\n output='screen'),\n ])\n","sub_path":"examples_rclcpp/launch/multiple_node.launch.py","file_name":"multiple_node.launch.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"557416970","text":"import time\nimport datetime\nimport requests\nimport csv\nimport platform\nimport os\nfrom os.path import expanduser\nimport traceback\nimport json\nimport pytz\nimport zlib\nimport numpy as np\nimport tushare as ts\nfrom pytdx.hq import TdxHq_API\nfrom pytdx.params import TDXParams\n\napi = TdxHq_API()\n\nbaseUrl = \"http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=*******&TYPE=k&js=(x)&rtntype=5&isCR=false&authorityType=fa&fsData1513514933723=fsData1513514933723\" #\"http://hq2fls.eastmoney.com/EM_Quote2010PictureApplication/Flash.aspx?Type=CHD&ID=*******&lastnum=300&r=0.6714464421384037\"\n\n\ndef cal_vol(code):\n #int_date = int(dates)\n result = []\n datas = api.get_transaction_data(TDXParams.MARKET_SH if code.startswith('6') else TDXParams.MARKET_SZ,\n code, 0, 2000)\n\n while len(datas) > 0:\n result += datas\n datas = api.get_transaction_data(TDXParams.MARKET_SH if code.startswith('6') else TDXParams.MARKET_SZ,\n code, len(result), 2000)\n\n if len(result) > 0:\n with open('test.txt', mode='a', encoding='utf-8') as f:\n for it in result:\n f.write(\"{0},{1},{2},{3}\\n\".format(code, it['time'], it['price'], it['vol']))\n\n\ndef cal(code):\n print(code)\n cal_vol(code)\n\ndef run():\n while True:\n try:\n print(\"try\")\n codes = ts.get_stock_basics().index.values\n for code in codes:\n try:\n cal(code)\n except Exception as error:\n traceback.print_exc()\n break\n except Exception as error:\n traceback.print_exc()\n\n time.sleep(10)\n\n\ndef main():\n with api.connect('119.147.212.81', 7709):\n run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xueqiu/jihejinjia.py","file_name":"jihejinjia.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"616419965","text":"from random import randrange\n\nfrom FindMostProbableKmerUsingProfileMatrix import find_most_probable_kmer_using_profile_matrix\nfrom MotifMatrixCount import motif_matrix_count\nfrom MotifMatrixProfile import motif_matrix_profile\nfrom ScoreMotif import score_motif\n\ndnas = ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG']\nk = 3\n\nmotifs = []\nfor dna in dnas:\n start = randrange(len(dna) - k)\n motif = dna[start:start+k]\n motifs.append(motif)\n\nbest_motifs = motifs\n\nwhile True:\n counts_matrix = motif_matrix_count(motifs)\n for elem, counts in counts_matrix.items(): # add in pseudocounts\n counts_matrix[elem] = [c + 1 for c in counts]\n profile_matrix = motif_matrix_profile(counts_matrix)\n\n motifs = [find_most_probable_kmer_using_profile_matrix(profile_matrix, dna)[0] for dna in dnas]\n if score_motif(motifs) < score_motif(best_motifs):\n best_motifs = motifs\n else:\n break\n\n[print(f'{m}') for m in best_motifs]","sub_path":"docs/data/learn/Bioinformatics/output/ch2_code/src/Stepik.2.7.ExerciseBreak1.py","file_name":"Stepik.2.7.ExerciseBreak1.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"477908227","text":"# -*- coding:utf-8 -*-\n# Copyright (c) 2015, Galaxy Authors. All Rights Reserved\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# Author: wangtaize@baidu.com\nimport os\nimport time\nimport logging\nfrom SOAPpy import WSDL\nfrom SOAPpy import headerType\nfrom django import shortcuts\nfrom bootstrap import settings\nfrom common import http\nfrom django.contrib import auth\nfrom django.contrib.auth import models\nimport xml.etree.ElementTree as ET\nLOG = logging.getLogger(\"console\")\n\ndef auto_login_required(func):\n def auto_login_wrapper(request, *args, **kwds):\n if request.user.is_authenticated():\n return func(request, *args, **kwds)\n ticket = request.GET.get('ticket',None)\n cas_url = \"%s?service=%s\"%(settings.UUAP_CAS_SERVER, settings.MY_HOST)\n if not ticket:\n LOG.info(\"redirect to %s\"%cas_url)\n return shortcuts.redirect(cas_url)\n else:\n user = auth.authenticate(ticket = ticket, service = settings.MY_HOST)\n if not user:\n return shortcuts.redirect(cas_url)\n else:\n auth.login(request, user)\n return func(request, *args, **kwds)\n return auto_login_wrapper\n\n\n\ndef auth_ticket(ticket, my_url):\n \"\"\"\n \n \n wangtaize \n \n \n \"\"\"\n client = http.HttpClient()\n auth_url = settings.UUAP_VALIDATE_URL\n response = client.do_post(auth_url,\n [('service',my_url),('ticket',ticket)],\n content_to_file = False)\n if response['error'] is None:\n root = ET.fromstring(response['content'])\n success = root.find('{http://www.yale.edu/tp/cas}authenticationSuccess')\n if success is not None:\n user = success.find('{http://www.yale.edu/tp/cas}user')\n if user is not None:\n return user.text\n return None\n\n\nclass UUAPBackend(object):\n def __init__(self):\n server = WSDL.Proxy(settings.UIC_SERVICE)\n hd = headerType(data={\"appKey\":settings.UIC_KEY})\n server.soapproxy.header = hd\n self.server = server \n def authenticate(self, ticket=None, service=None):\n username = auth_ticket(ticket, service)\n if not username:\n return None\n i_user = list(models.User.objects.filter(username=username))\n if not i_user:\n uic_user = server.getUserByUsername(arg0 = username)\n if hasattr(uic_user,'username'):\n new_user = models.User(username = uic_user.username,\n email = uic_user.email)\n new_user.save()\n return new_user\n return None\n return i_user[0]\n def get_user(self, user_id):\n try:\n return models.User.objects.get(pk=user_id)\n except models.User.DoesNotExist:\n return None\n","sub_path":"console/backend/src/common/cas.py","file_name":"cas.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"277273109","text":"from django.test import TestCase\n\nfrom database.models import LaundryShop, Rating\n\n# Create your tests here.\nclass ModelTestCase(TestCase):\n def test_simple_location_property(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n expected = 'barangay1, province1'\n self.assertEquals(shop.location, expected)\n\n def test_complete_location_property(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n city='city1', barangay='barangay1', street='street1',\n building='building1', contact_number='12345612',\n hours_open='12hours', days_open='never')\n expected = 'building1, street1, barangay1, city1, province1'\n self.assertEquals(shop.location, expected)\n\n def test_average_rating(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n Rating.objects.create(laundry_shop=shop, paws=4)\n Rating.objects.create(laundry_shop=shop, paws=5)\n expected = (4 + 5) / 2.0\n self.assertEquals(shop.average_rating, expected)\n\n def test_average_rating_no_rating(self):\n shop = LaundryShop.objects.create(name='ls1', province='province1',\n barangay='barangay1', contact_number='12345',\n hours_open='24 hours', days_open='mon - sat')\n expected = 0\n self.assertEquals(shop.average_rating, expected)\n","sub_path":"LaundryBear/database/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"521703288","text":"'''\n# Constructs Programming Model\n\n> Software-defined state\n\n\n[](https://badge.fury.io/js/constructs)\n[](https://badge.fury.io/py/constructs)\n[](https://badge.fury.io/nu/Constructs)\n[](https://maven-badges.herokuapp.com/maven-central/software.constructs/constructs)\n\n## What are constructs?\n\nConstructs are classes which define a \"piece of system state\". Constructs can be composed together to form higher-level building blocks which represent more complex state.\n\nConstructs are often used to represent the *desired state* of cloud applications. For example, in the AWS CDK, which is used to define the desired state for AWS infrastructure using CloudFormation, the lowest-level construct represents a *resource definition* in a CloudFormation template. These resources are composed to represent higher-level logical units of a cloud application, etc.\n\n## Contributing\n\nThis project has adopted the [Amazon Open Source Code of\nConduct](https://aws.github.io/code-of-conduct).\n\nWe welcome community contributions and pull requests. See our [contribution\nguide](./CONTRIBUTING.md) for more information on how to report issues, set up a\ndevelopment environment and submit code.\n\n## License\n\nThis project is distributed under the [Apache License, Version 2.0](./LICENSE).\n'''\nimport abc\nimport builtins\nimport datetime\nimport enum\nimport typing\n\nimport jsii\nimport publication\nimport typing_extensions\n\nfrom ._jsii import *\n\n\nclass ConstructMetadata(\n metaclass=jsii.JSIIMeta,\n jsii_type=\"constructs.ConstructMetadata\",\n):\n '''Metadata keys used by constructs.'''\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"DISABLE_STACK_TRACE_IN_METADATA\")\n def DISABLE_STACK_TRACE_IN_METADATA(cls) -> builtins.str:\n '''If set in the construct's context, omits stack traces from metadata entries.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"DISABLE_STACK_TRACE_IN_METADATA\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"ERROR_METADATA_KEY\")\n def ERROR_METADATA_KEY(cls) -> builtins.str:\n '''Context type for error level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"ERROR_METADATA_KEY\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"INFO_METADATA_KEY\")\n def INFO_METADATA_KEY(cls) -> builtins.str:\n '''Context type for info level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"INFO_METADATA_KEY\"))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"WARNING_METADATA_KEY\")\n def WARNING_METADATA_KEY(cls) -> builtins.str:\n '''Context type for warning level messages.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"WARNING_METADATA_KEY\"))\n\n\n@jsii.data_type(\n jsii_type=\"constructs.ConstructOptions\",\n jsii_struct_bases=[],\n name_mapping={\"node_factory\": \"nodeFactory\"},\n)\nclass ConstructOptions:\n def __init__(self, *, node_factory: typing.Optional[\"INodeFactory\"] = None) -> None:\n '''Options for creating constructs.\n\n :param node_factory: A factory for attaching ``Node``s to the construct. Default: - the default ``Node`` is associated\n '''\n self._values: typing.Dict[str, typing.Any] = {}\n if node_factory is not None:\n self._values[\"node_factory\"] = node_factory\n\n @builtins.property\n def node_factory(self) -> typing.Optional[\"INodeFactory\"]:\n '''A factory for attaching ``Node``s to the construct.\n\n :default: - the default ``Node`` is associated\n '''\n result = self._values.get(\"node_factory\")\n return typing.cast(typing.Optional[\"INodeFactory\"], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"ConstructOptions(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.enum(jsii_type=\"constructs.ConstructOrder\")\nclass ConstructOrder(enum.Enum):\n '''In what order to return constructs.'''\n\n PREORDER = \"PREORDER\"\n '''Depth-first, pre-order.'''\n POSTORDER = \"POSTORDER\"\n '''Depth-first, post-order (leaf nodes first).'''\n\n\n@jsii.data_type(\n jsii_type=\"constructs.Dependency\",\n jsii_struct_bases=[],\n name_mapping={\"source\": \"source\", \"target\": \"target\"},\n)\nclass Dependency:\n def __init__(self, *, source: \"IConstruct\", target: \"IConstruct\") -> None:\n '''A single dependency.\n\n :param source: Source the dependency.\n :param target: Target of the dependency.\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"source\": source,\n \"target\": target,\n }\n\n @builtins.property\n def source(self) -> \"IConstruct\":\n '''Source the dependency.'''\n result = self._values.get(\"source\")\n assert result is not None, \"Required property 'source' is missing\"\n return typing.cast(\"IConstruct\", result)\n\n @builtins.property\n def target(self) -> \"IConstruct\":\n '''Target of the dependency.'''\n result = self._values.get(\"target\")\n assert result is not None, \"Required property 'target' is missing\"\n return typing.cast(\"IConstruct\", result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"Dependency(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.interface(jsii_type=\"constructs.IAspect\")\nclass IAspect(typing_extensions.Protocol):\n '''Represents an Aspect.'''\n\n @jsii.member(jsii_name=\"visit\")\n def visit(self, node: \"IConstruct\") -> None:\n '''All aspects can visit an IConstruct.\n\n :param node: -\n '''\n ...\n\n\nclass _IAspectProxy:\n '''Represents an Aspect.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IAspect\"\n\n @jsii.member(jsii_name=\"visit\")\n def visit(self, node: \"IConstruct\") -> None:\n '''All aspects can visit an IConstruct.\n\n :param node: -\n '''\n return typing.cast(None, jsii.invoke(self, \"visit\", [node]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IAspect).__jsii_proxy_class__ = lambda : _IAspectProxy\n\n\n@jsii.interface(jsii_type=\"constructs.IConstruct\")\nclass IConstruct(typing_extensions.Protocol):\n '''Represents a construct.'''\n\n pass\n\n\nclass _IConstructProxy:\n '''Represents a construct.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IConstruct\"\n pass\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IConstruct).__jsii_proxy_class__ = lambda : _IConstructProxy\n\n\n@jsii.interface(jsii_type=\"constructs.INodeFactory\")\nclass INodeFactory(typing_extensions.Protocol):\n '''A factory for attaching ``Node``s to the construct.'''\n\n @jsii.member(jsii_name=\"createNode\")\n def create_node(\n self,\n host: \"Construct\",\n scope: IConstruct,\n id: builtins.str,\n ) -> \"Node\":\n '''Returns a new ``Node`` associated with ``host``.\n\n :param host: the associated construct.\n :param scope: the construct's scope (parent).\n :param id: the construct id.\n '''\n ...\n\n\nclass _INodeFactoryProxy:\n '''A factory for attaching ``Node``s to the construct.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.INodeFactory\"\n\n @jsii.member(jsii_name=\"createNode\")\n def create_node(\n self,\n host: \"Construct\",\n scope: IConstruct,\n id: builtins.str,\n ) -> \"Node\":\n '''Returns a new ``Node`` associated with ``host``.\n\n :param host: the associated construct.\n :param scope: the construct's scope (parent).\n :param id: the construct id.\n '''\n return typing.cast(\"Node\", jsii.invoke(self, \"createNode\", [host, scope, id]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, INodeFactory).__jsii_proxy_class__ = lambda : _INodeFactoryProxy\n\n\n@jsii.interface(jsii_type=\"constructs.ISynthesisSession\")\nclass ISynthesisSession(typing_extensions.Protocol):\n '''Represents a single session of synthesis.\n\n Passed into ``construct.onSynthesize()`` methods.\n '''\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"outdir\")\n def outdir(self) -> builtins.str:\n '''The output directory for this synthesis session.'''\n ...\n\n\nclass _ISynthesisSessionProxy:\n '''Represents a single session of synthesis.\n\n Passed into ``construct.onSynthesize()`` methods.\n '''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.ISynthesisSession\"\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"outdir\")\n def outdir(self) -> builtins.str:\n '''The output directory for this synthesis session.'''\n return typing.cast(builtins.str, jsii.get(self, \"outdir\"))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, ISynthesisSession).__jsii_proxy_class__ = lambda : _ISynthesisSessionProxy\n\n\n@jsii.interface(jsii_type=\"constructs.IValidation\")\nclass IValidation(typing_extensions.Protocol):\n '''Implement this interface in order for the construct to be able to validate itself.'''\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[builtins.str]:\n '''Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n '''\n ...\n\n\nclass _IValidationProxy:\n '''Implement this interface in order for the construct to be able to validate itself.'''\n\n __jsii_type__: typing.ClassVar[str] = \"constructs.IValidation\"\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[builtins.str]:\n '''Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n '''\n return typing.cast(typing.List[builtins.str], jsii.invoke(self, \"validate\", []))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, IValidation).__jsii_proxy_class__ = lambda : _IValidationProxy\n\n\n@jsii.data_type(\n jsii_type=\"constructs.MetadataEntry\",\n jsii_struct_bases=[],\n name_mapping={\"data\": \"data\", \"type\": \"type\", \"trace\": \"trace\"},\n)\nclass MetadataEntry:\n def __init__(\n self,\n *,\n data: typing.Any,\n type: builtins.str,\n trace: typing.Optional[typing.Sequence[builtins.str]] = None,\n ) -> None:\n '''An entry in the construct metadata table.\n\n :param data: The data.\n :param type: The metadata entry type.\n :param trace: Stack trace. Can be omitted by setting the context key ``ConstructMetadata.DISABLE_STACK_TRACE_IN_METADATA`` to 1. Default: - no trace information\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"data\": data,\n \"type\": type,\n }\n if trace is not None:\n self._values[\"trace\"] = trace\n\n @builtins.property\n def data(self) -> typing.Any:\n '''The data.'''\n result = self._values.get(\"data\")\n assert result is not None, \"Required property 'data' is missing\"\n return typing.cast(typing.Any, result)\n\n @builtins.property\n def type(self) -> builtins.str:\n '''The metadata entry type.'''\n result = self._values.get(\"type\")\n assert result is not None, \"Required property 'type' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def trace(self) -> typing.Optional[typing.List[builtins.str]]:\n '''Stack trace.\n\n Can be omitted by setting the context key\n ``ConstructMetadata.DISABLE_STACK_TRACE_IN_METADATA`` to 1.\n\n :default: - no trace information\n '''\n result = self._values.get(\"trace\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"MetadataEntry(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\nclass Node(metaclass=jsii.JSIIMeta, jsii_type=\"constructs.Node\"):\n '''Represents the construct node in the scope tree.'''\n\n def __init__(self, host: \"Construct\", scope: IConstruct, id: builtins.str) -> None:\n '''\n :param host: -\n :param scope: -\n :param id: -\n '''\n jsii.create(Node, self, [host, scope, id])\n\n @jsii.member(jsii_name=\"of\") # type: ignore[misc]\n @builtins.classmethod\n def of(cls, construct: IConstruct) -> \"Node\":\n '''Returns the node associated with a construct.\n\n :param construct: the construct.\n '''\n return typing.cast(\"Node\", jsii.sinvoke(cls, \"of\", [construct]))\n\n @jsii.member(jsii_name=\"addDependency\")\n def add_dependency(self, *dependencies: IConstruct) -> None:\n '''Add an ordering dependency on another Construct.\n\n All constructs in the dependency's scope will be deployed before any\n construct in this construct's scope.\n\n :param dependencies: -\n '''\n return typing.cast(None, jsii.invoke(self, \"addDependency\", [*dependencies]))\n\n @jsii.member(jsii_name=\"addError\")\n def add_error(self, message: builtins.str) -> None:\n '''Adds an { \"error\": } metadata entry to this construct.\n\n The toolkit will fail synthesis when errors are reported.\n\n :param message: The error message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addError\", [message]))\n\n @jsii.member(jsii_name=\"addInfo\")\n def add_info(self, message: builtins.str) -> None:\n '''Adds a { \"info\": } metadata entry to this construct.\n\n The toolkit will display the info message when apps are synthesized.\n\n :param message: The info message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addInfo\", [message]))\n\n @jsii.member(jsii_name=\"addMetadata\")\n def add_metadata(\n self,\n type: builtins.str,\n data: typing.Any,\n from_function: typing.Any = None,\n ) -> None:\n '''Adds a metadata entry to this construct.\n\n Entries are arbitrary values and will also include a stack trace to allow tracing back to\n the code location for when the entry was added. It can be used, for example, to include source\n mapping in CloudFormation templates to improve diagnostics.\n\n :param type: a string denoting the type of metadata.\n :param data: the value of the metadata (can be a Token). If null/undefined, metadata will not be added.\n :param from_function: a function under which to restrict the metadata entry's stack trace (defaults to this.addMetadata).\n '''\n return typing.cast(None, jsii.invoke(self, \"addMetadata\", [type, data, from_function]))\n\n @jsii.member(jsii_name=\"addValidation\")\n def add_validation(self, validation: IValidation) -> None:\n '''Adds a validation to this construct.\n\n When ``node.validate()`` is called, the ``validate()`` method will be called on\n all validations and all errors will be returned.\n\n :param validation: -\n '''\n return typing.cast(None, jsii.invoke(self, \"addValidation\", [validation]))\n\n @jsii.member(jsii_name=\"addWarning\")\n def add_warning(self, message: builtins.str) -> None:\n '''Adds a { \"warning\": } metadata entry to this construct.\n\n The toolkit will display the warning when an app is synthesized, or fail\n if run in --strict mode.\n\n :param message: The warning message.\n '''\n return typing.cast(None, jsii.invoke(self, \"addWarning\", [message]))\n\n @jsii.member(jsii_name=\"applyAspect\")\n def apply_aspect(self, aspect: IAspect) -> None:\n '''Applies the aspect to this Constructs node.\n\n :param aspect: -\n '''\n return typing.cast(None, jsii.invoke(self, \"applyAspect\", [aspect]))\n\n @jsii.member(jsii_name=\"findAll\")\n def find_all(\n self,\n order: typing.Optional[ConstructOrder] = None,\n ) -> typing.List[IConstruct]:\n '''Return this construct and all of its children in the given order.\n\n :param order: -\n '''\n return typing.cast(typing.List[IConstruct], jsii.invoke(self, \"findAll\", [order]))\n\n @jsii.member(jsii_name=\"findChild\")\n def find_child(self, id: builtins.str) -> IConstruct:\n '''Return a direct child by id.\n\n Throws an error if the child is not found.\n\n :param id: Identifier of direct child.\n\n :return: Child with the given id.\n '''\n return typing.cast(IConstruct, jsii.invoke(self, \"findChild\", [id]))\n\n @jsii.member(jsii_name=\"prepare\")\n def prepare(self) -> None:\n '''Invokes \"prepare\" on all constructs (depth-first, post-order) in the tree under ``node``.'''\n return typing.cast(None, jsii.invoke(self, \"prepare\", []))\n\n @jsii.member(jsii_name=\"setContext\")\n def set_context(self, key: builtins.str, value: typing.Any) -> None:\n '''This can be used to set contextual values.\n\n Context must be set before any children are added, since children may consult context info during construction.\n If the key already exists, it will be overridden.\n\n :param key: The context key.\n :param value: The context value.\n '''\n return typing.cast(None, jsii.invoke(self, \"setContext\", [key, value]))\n\n @jsii.member(jsii_name=\"synthesize\")\n def synthesize(\n self,\n *,\n outdir: builtins.str,\n session_context: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,\n skip_validation: typing.Optional[builtins.bool] = None,\n ) -> None:\n '''Synthesizes a CloudAssembly from a construct tree.\n\n :param outdir: The output directory into which to synthesize the cloud assembly. Default: - creates a temporary directory\n :param session_context: Additional context passed into the synthesis session object when ``construct.synth`` is called. Default: - no additional context is passed to ``onSynthesize``\n :param skip_validation: Whether synthesis should skip the validation phase. Default: false\n '''\n options = SynthesisOptions(\n outdir=outdir,\n session_context=session_context,\n skip_validation=skip_validation,\n )\n\n return typing.cast(None, jsii.invoke(self, \"synthesize\", [options]))\n\n @jsii.member(jsii_name=\"tryFindChild\")\n def try_find_child(self, id: builtins.str) -> typing.Optional[IConstruct]:\n '''Return a direct child by id, or undefined.\n\n :param id: Identifier of direct child.\n\n :return: the child if found, or undefined\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.invoke(self, \"tryFindChild\", [id]))\n\n @jsii.member(jsii_name=\"tryGetContext\")\n def try_get_context(self, key: builtins.str) -> typing.Any:\n '''Retrieves a value from tree context.\n\n Context is usually initialized at the root, but can be overridden at any point in the tree.\n\n :param key: The context key.\n\n :return: The context value or ``undefined`` if there is no context value for thie key.\n '''\n return typing.cast(typing.Any, jsii.invoke(self, \"tryGetContext\", [key]))\n\n @jsii.member(jsii_name=\"tryRemoveChild\")\n def try_remove_child(self, child_name: builtins.str) -> builtins.bool:\n '''(experimental) Remove the child with the given name, if present.\n\n :param child_name: -\n\n :return: Whether a child with the given name was deleted.\n\n :stability: experimental\n '''\n return typing.cast(builtins.bool, jsii.invoke(self, \"tryRemoveChild\", [child_name]))\n\n @jsii.member(jsii_name=\"validate\")\n def validate(self) -> typing.List[\"ValidationError\"]:\n '''Validates tree (depth-first, pre-order) and returns the list of all errors.\n\n An empty list indicates that there are no errors.\n '''\n return typing.cast(typing.List[\"ValidationError\"], jsii.invoke(self, \"validate\", []))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"PATH_SEP\")\n def PATH_SEP(cls) -> builtins.str:\n '''Separator used to delimit construct path components.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"PATH_SEP\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"addr\")\n def addr(self) -> builtins.str:\n '''Returns an opaque tree-unique address for this construct.\n\n Addresses are 42 characters hexadecimal strings. They begin with \"c8\"\n followed by 40 lowercase hexadecimal characters (0-9a-f).\n\n Addresses are calculated using a SHA-1 of the components of the construct\n path.\n\n To enable refactorings of construct trees, constructs with the ID ``Default``\n will be excluded from the calculation. In those cases constructs in the\n same tree may have the same addreess.\n\n Example::\n\n # Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826\n c83a2846e506bcc5f10682b564084bca2d275709ee\n '''\n return typing.cast(builtins.str, jsii.get(self, \"addr\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"children\")\n def children(self) -> typing.List[IConstruct]:\n '''All direct children of this construct.'''\n return typing.cast(typing.List[IConstruct], jsii.get(self, \"children\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"dependencies\")\n def dependencies(self) -> typing.List[Dependency]:\n '''Return all dependencies registered on this node or any of its children.'''\n return typing.cast(typing.List[Dependency], jsii.get(self, \"dependencies\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"id\")\n def id(self) -> builtins.str:\n '''The id of this construct within the current scope.\n\n This is a a scope-unique id. To obtain an app-unique id for this construct, use ``uniqueId``.\n '''\n return typing.cast(builtins.str, jsii.get(self, \"id\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"locked\")\n def locked(self) -> builtins.bool:\n '''Returns true if this construct or the scopes in which it is defined are locked.'''\n return typing.cast(builtins.bool, jsii.get(self, \"locked\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"metadata\")\n def metadata(self) -> typing.List[MetadataEntry]:\n '''An immutable array of metadata objects associated with this construct.\n\n This can be used, for example, to implement support for deprecation notices, source mapping, etc.\n '''\n return typing.cast(typing.List[MetadataEntry], jsii.get(self, \"metadata\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"path\")\n def path(self) -> builtins.str:\n '''The full, absolute path of this construct in the tree.\n\n Components are separated by '/'.\n '''\n return typing.cast(builtins.str, jsii.get(self, \"path\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"root\")\n def root(self) -> IConstruct:\n '''Returns the root of the construct tree.\n\n :return: The root of the construct tree.\n '''\n return typing.cast(IConstruct, jsii.get(self, \"root\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"scopes\")\n def scopes(self) -> typing.List[IConstruct]:\n '''All parent scopes of this construct.\n\n :return:\n\n a list of parent scopes. The last element in the list will always\n be the current construct and the first element will be the root of the\n tree.\n '''\n return typing.cast(typing.List[IConstruct], jsii.get(self, \"scopes\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"uniqueId\")\n def unique_id(self) -> builtins.str:\n '''(deprecated) A tree-global unique alphanumeric identifier for this construct.\n\n Includes\n all components of the tree.\n\n :deprecated:\n\n please avoid using this property and use ``addr`` to form unique names.\n This algorithm uses MD5, which is not FIPS-complient and also excludes the\n identity of the root construct from the calculation.\n\n :stability: deprecated\n '''\n return typing.cast(builtins.str, jsii.get(self, \"uniqueId\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"scope\")\n def scope(self) -> typing.Optional[IConstruct]:\n '''Returns the scope in which this construct is defined.\n\n The value is ``undefined`` at the root of the construct scope tree.\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.get(self, \"scope\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"defaultChild\")\n def default_child(self) -> typing.Optional[IConstruct]:\n '''Returns the child construct that has the id ``Default`` or ``Resource\"``.\n\n This is usually the construct that provides the bulk of the underlying functionality.\n Useful for modifications of the underlying construct that are not available at the higher levels.\n Override the defaultChild property.\n\n This should only be used in the cases where the correct\n default child is not named 'Resource' or 'Default' as it\n should be.\n\n If you set this to undefined, the default behavior of finding\n the child named 'Resource' or 'Default' will be used.\n\n :return: a construct or undefined if there is no default child\n\n :throws: if there is more than one child\n '''\n return typing.cast(typing.Optional[IConstruct], jsii.get(self, \"defaultChild\"))\n\n @default_child.setter\n def default_child(self, value: typing.Optional[IConstruct]) -> None:\n jsii.set(self, \"defaultChild\", value)\n\n\n@jsii.data_type(\n jsii_type=\"constructs.SynthesisOptions\",\n jsii_struct_bases=[],\n name_mapping={\n \"outdir\": \"outdir\",\n \"session_context\": \"sessionContext\",\n \"skip_validation\": \"skipValidation\",\n },\n)\nclass SynthesisOptions:\n def __init__(\n self,\n *,\n outdir: builtins.str,\n session_context: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,\n skip_validation: typing.Optional[builtins.bool] = None,\n ) -> None:\n '''Options for synthesis.\n\n :param outdir: The output directory into which to synthesize the cloud assembly. Default: - creates a temporary directory\n :param session_context: Additional context passed into the synthesis session object when ``construct.synth`` is called. Default: - no additional context is passed to ``onSynthesize``\n :param skip_validation: Whether synthesis should skip the validation phase. Default: false\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"outdir\": outdir,\n }\n if session_context is not None:\n self._values[\"session_context\"] = session_context\n if skip_validation is not None:\n self._values[\"skip_validation\"] = skip_validation\n\n @builtins.property\n def outdir(self) -> builtins.str:\n '''The output directory into which to synthesize the cloud assembly.\n\n :default: - creates a temporary directory\n '''\n result = self._values.get(\"outdir\")\n assert result is not None, \"Required property 'outdir' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def session_context(\n self,\n ) -> typing.Optional[typing.Mapping[builtins.str, typing.Any]]:\n '''Additional context passed into the synthesis session object when ``construct.synth`` is called.\n\n :default: - no additional context is passed to ``onSynthesize``\n '''\n result = self._values.get(\"session_context\")\n return typing.cast(typing.Optional[typing.Mapping[builtins.str, typing.Any]], result)\n\n @builtins.property\n def skip_validation(self) -> typing.Optional[builtins.bool]:\n '''Whether synthesis should skip the validation phase.\n\n :default: false\n '''\n result = self._values.get(\"skip_validation\")\n return typing.cast(typing.Optional[builtins.bool], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"SynthesisOptions(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.data_type(\n jsii_type=\"constructs.ValidationError\",\n jsii_struct_bases=[],\n name_mapping={\"message\": \"message\", \"source\": \"source\"},\n)\nclass ValidationError:\n def __init__(self, *, message: builtins.str, source: \"Construct\") -> None:\n '''An error returned during the validation phase.\n\n :param message: The error message.\n :param source: The construct which emitted the error.\n '''\n self._values: typing.Dict[str, typing.Any] = {\n \"message\": message,\n \"source\": source,\n }\n\n @builtins.property\n def message(self) -> builtins.str:\n '''The error message.'''\n result = self._values.get(\"message\")\n assert result is not None, \"Required property 'message' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def source(self) -> \"Construct\":\n '''The construct which emitted the error.'''\n result = self._values.get(\"source\")\n assert result is not None, \"Required property 'source' is missing\"\n return typing.cast(\"Construct\", result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"ValidationError(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n@jsii.implements(IConstruct)\nclass Construct(metaclass=jsii.JSIIMeta, jsii_type=\"constructs.Construct\"):\n '''Represents the building block of the construct graph.\n\n All constructs besides the root construct must be created within the scope of\n another construct.\n '''\n\n def __init__(\n self,\n scope: \"Construct\",\n id: builtins.str,\n *,\n node_factory: typing.Optional[INodeFactory] = None,\n ) -> None:\n '''Creates a new construct node.\n\n :param scope: The scope in which to define this construct.\n :param id: The scoped construct ID. Must be unique amongst siblings. If the ID includes a path separator (``/``), then it will be replaced by double dash ``--``.\n :param node_factory: A factory for attaching ``Node``s to the construct. Default: - the default ``Node`` is associated\n '''\n options = ConstructOptions(node_factory=node_factory)\n\n jsii.create(Construct, self, [scope, id, options])\n\n @jsii.member(jsii_name=\"onPrepare\")\n def _on_prepare(self) -> None:\n '''Perform final modifications before synthesis.\n\n This method can be implemented by derived constructs in order to perform\n final changes before synthesis. prepare() will be called after child\n constructs have been prepared.\n\n This is an advanced framework feature. Only use this if you\n understand the implications.\n '''\n return typing.cast(None, jsii.invoke(self, \"onPrepare\", []))\n\n @jsii.member(jsii_name=\"onSynthesize\")\n def _on_synthesize(self, session: ISynthesisSession) -> None:\n '''Allows this construct to emit artifacts into the cloud assembly during synthesis.\n\n This method is usually implemented by framework-level constructs such as ``Stack`` and ``Asset``\n as they participate in synthesizing the cloud assembly.\n\n :param session: The synthesis session.\n '''\n return typing.cast(None, jsii.invoke(self, \"onSynthesize\", [session]))\n\n @jsii.member(jsii_name=\"onValidate\")\n def _on_validate(self) -> typing.List[builtins.str]:\n '''(deprecated) Validate the current construct.\n\n This method can be implemented by derived constructs in order to perform\n validation logic. It is called on all constructs before synthesis.\n\n :return: An array of validation error messages, or an empty array if there the construct is valid.\n\n :deprecated:\n\n use ``Node.addValidation()`` to subscribe validation functions on this construct\n instead of overriding this method.\n\n :stability: deprecated\n '''\n return typing.cast(typing.List[builtins.str], jsii.invoke(self, \"onValidate\", []))\n\n @jsii.member(jsii_name=\"toString\")\n def to_string(self) -> builtins.str:\n '''Returns a string representation of this construct.'''\n return typing.cast(builtins.str, jsii.invoke(self, \"toString\", []))\n\n\n__all__ = [\n \"Construct\",\n \"ConstructMetadata\",\n \"ConstructOptions\",\n \"ConstructOrder\",\n \"Dependency\",\n \"IAspect\",\n \"IConstruct\",\n \"INodeFactory\",\n \"ISynthesisSession\",\n \"IValidation\",\n \"MetadataEntry\",\n \"Node\",\n \"SynthesisOptions\",\n \"ValidationError\",\n]\n\npublication.publish()\n","sub_path":".env/lib/python3.8/site-packages/constructs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":35031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"81149720","text":"\"\"\"\nAuthor - Noah Kruss\n\nFile that contains the analysis class with the functions for analysising the\naural metamaterial system\n\"\"\"\n\n#---------------IMPORT STATEMENTS------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\nimport os\nimport shutil\nimport statistics as stats\nimport scipy.signal\nimport scipy.special\nfrom scipy.stats import norm\nimport pandas as pd\nimport pywt\n\nimport gsd.pygsd as GSD_pygsd\nimport gsd.hoomd as GSD_hoomd\n\nfrom scipy.optimize import curve_fit\nfrom astropy.modeling import models, fitting\n\n#---------------Helper Function------------------------------\ndef gauss_function(x, a, x0, sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n\n#---------------Analysis Class-------------------------------\nclass Aural_Analysis():\n\n def __init__(self):\n\n self.particle_data = None\n\n self.dt = None\n self.m = None\n self.N = None\n\n def read_data(self, fname: str):\n \"\"\"\n Function to read the recorded data from a simulation from a gsd file\n\n Inputs:\n fname - (str) name of gsd file containing simulation data\n \"\"\"\n\n #open data file\n f = GSD_pygsd.GSDFile(open(fname, 'rb'))\n t = GSD_hoomd.HOOMDTrajectory(f)\n\n self.particle_data = t\n self.N = len(t[0].particles.position)\n\n def fourier_plot(self, x_data: list, y_data: list, store_loc = None, plot_title = \"Fourier_plot\"):\n \"\"\"\n Function for ploting and returning the fourier data of a given inputed data set\n \"\"\"\n\n x_data = np.array(x_data)\n y_data = np.array(y_data)\n\n fft = np.fft.fft(y_data)\n fft[0] = 0\n\n N = len(x_data)\n T = x_data[1] - x_data[0] # sampling interval\n freq = np.fft.fftfreq(N, d=T)\n\n plt.plot(abs(freq), abs(fft.real))\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Frequency [1 / wave-length]\")\n\n #find peak of graph\n decreasing = True\n peak = None\n for value_i in range(len(x_data)):\n amp = fft[value_i]\n frequency = freq[value_i]\n\n if peak == None:\n peak = (frequency, amp)\n elif decreasing == True and peak[1] < amp:\n decreasing = False\n elif decreasing == True:\n peak = (frequency, amp)\n elif decreasing == False and amp > peak[1]:\n peak = (frequency, amp)\n\n plt.title(f\"{plot_title}\\nPeak at {peak[0]} Hz\")\n\n plt.savefig(plot_title)\n if store_loc != None:\n shutil.move(f\"{plot_title}.png\", store_loc)\n plt.clf()\n\n return (abs(freq), abs(fft.real))\n\n def wave_packet(self, dt, store_loc = None, plot_title = \"Waterfall plot\", num_samples = 8, target_times = None):\n \"\"\"\n Function for generating and saving a waterfall plot of the system\n standing wave over the course of the simulation along with prefroming\n a fourier transform at each targeted time snapshot\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated plot (if left\n as None then waterfall plot will be shown not saved)\n plot_title - (str) name for the waterfall plot\n num_samples - (int) option for the number of time shapshots to display\n in the waterfall plot\n target_times - (list) list of specific timesteps to display in the\n waterfall plot. Will override the num_samples property\n if set to non-None\n \"\"\"\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n\n target_index.append(len(self.particle_data) - 1)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n\n #-----------------------------------------------------\n #create and save waterfall plot of wave packet\n g1 = plt.figure(1)\n plt.xlabel('particle position')\n plt.ylabel('time')\n plt.yticks([])\n plt.title(plot_title, fontsize = 7)\n\n shift = 0\n packet_amplitude_list = []\n fit_amp_list = []\n fit_std_list = []\n for i in target_index:\n time_step = i * 500\n #print(f\"---{time_step}---\")\n p_list = []\n offsets = []\n abs_offsets = []\n amplitude = 0\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n position += shift\n abs_position += shift\n\n #update amplitude\n if abs(position - shift) > amplitude:\n amplitude = abs(position - shift)\n\n p_list.append(p_index)\n offsets.append(position)\n abs_offsets.append(abs_position)\n packet_amplitude_list.append(amplitude)\n\n plt.plot(p_list, offsets, color = \"b\")\n # plt.plot(p_list, abs_offsets, color = \"g\")\n\n #shift -= .5\n shift -= .175\n\n if store_loc != None:\n plt.savefig(\"Gausian_plot\")\n shutil.move(\"Gausian_plot.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n df = pd.DataFrame(packet_amplitude_list)\n df.to_excel(\"amplitudes.xlsx\")\n shutil.move(\"amplitudes.xlsx\", store_loc)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets, store_loc = store_loc, plot_title = f\"Fourier_plot_time={int(time_step * .0001)}\")\n fourier_data_list.append(fourier_data)\n\n\n def gaussian_fitting(self, dt, store_loc = None, num_samples = 8, target_times = None):\n \"\"\"\n Function for generating and saving xlsx file of the gaussian fit\n parameters for the system standing wave over the course of the simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if (time_step % sample_period == 0) and (time_step * dt * w > 200):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #-----------------------------------------------------\n g1 = plt.figure(1)\n\n shift = 0\n packet_amplitude_list = []\n fit_amp_list = []\n fit_std_list = []\n fit_cent_list = []\n unwrap_counter = 0\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n abs_offsets = []\n amplitude = 0\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n abs_position = abs(position)\n position += shift\n abs_position += shift\n\n #update amplitude\n if abs(position - shift) > amplitude:\n amplitude = abs(position - shift)\n\n p_list.append(p_index)\n offsets.append(position)\n abs_offsets.append(abs_position)\n packet_amplitude_list.append(amplitude)\n\n\n #setting up gausian fit\n gausian_left_index = 0\n gausian_right_index = 0\n if abs_offsets[0] == shift:\n for i in range(0, len(abs_offsets)):\n if abs_offsets[i] - shift != 0:\n gausian_left_index = i\n break\n for i in range(len(abs_offsets) - 1, 0, -1):\n if abs_offsets[i] - shift != 0:\n gausian_right_index = i\n break\n else:\n count = 0\n for i in range(0, len(abs_offsets)):\n if abs_offsets[i] - shift == 0:\n count += 1\n else:\n gausian_right_index += 1\n count = 0\n if count >= 20:\n break\n count = 0\n gausian_left_index = len(abs_offsets) - 1\n for i in range(len(abs_offsets) - 1, 0, -1):\n if abs_offsets[i] - shift == 0:\n count += 1\n else:\n gausian_left_index -= 1\n count = 0\n if count >= 20:\n break\n\n #create a list of the amplitudes of the gausian without the periodic boundary conditions\n gausian_removed_periodic = []\n if gausian_left_index < gausian_right_index:\n for i in range(gausian_left_index, gausian_right_index, 1):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n sigma1 = len(gausian_removed_periodic) / 4\n else:\n for i in range(gausian_left_index, len(abs_offsets), 1):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n for i in range(gausian_right_index):\n gausian_removed_periodic.append(abs_offsets[i] - shift)\n sigma1 = len(gausian_removed_periodic) / 4\n\n while(len(gausian_removed_periodic) != len(abs_offsets)):\n gausian_removed_periodic.append(0)\n\n amp1 = max(gausian_removed_periodic)\n cen1 = gausian_removed_periodic.index(amp1)\n\n popt_gauss, pcov_gauss = scipy.optimize.curve_fit(gauss_function, p_list, gausian_removed_periodic, p0=[amp1, cen1, sigma1])\n fit_amp_list.append(popt_gauss[0])\n fit_std_list.append(popt_gauss[2])\n\n fit_center = abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N)\n if len(fit_cent_list) > 4:\n if(fit_cent_list[-4] > fit_center and\n fit_cent_list[-3] > fit_center and\n fit_cent_list[-2] > fit_center and\n fit_cent_list[-1] > fit_center):\n #print(f\"wrapping on {abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N)}, prev = {fit_cent_list[-2]}, unwrap_counter = {unwrap_counter}\")\n unwrap_counter += 1;\n fit_cent_list.append(abs_offsets.index(amp1 + shift) + (unwrap_counter * self.N))\n\n shift -= .175\n\n if store_loc != None:\n df = pd.DataFrame({\"Dimensionless Time\": dimensionless_time, \"Amplitude\": fit_amp_list, \"STD\": fit_std_list, \"Center\": fit_cent_list})\n df.to_excel(\"gaussian_fit_parameters.xlsx\")\n shutil.move(\"gaussian_fit_parameters.xlsx\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n #-------------Collect Fit Error------------------------\n\n #get fit parameter errors\n amp_perc_error_list = []\n std_perc_error_list = []\n for i in range(len(fit_amp_list)):\n amp_perc_error_list.append((fit_amp_list[i] - fit_amp_list[0]) / fit_amp_list[0])\n std_perc_error_list.append((fit_std_list[i] - fit_std_list[0]) / fit_std_list[0] * 100)\n\n #create error plots of fit parameters\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit Amplitude Factor')\n plt.title(\"Gaussian Fit Amplitude Factor\")\n plt.plot(dimensionless_time, amp_perc_error_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_Amp\")\n shutil.move(\"Gaussian_Fit_Amp.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit STD Percent Error')\n plt.title(\"Gaussian Fit STD Error\")\n plt.plot(dimensionless_time, std_perc_error_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_STD\")\n shutil.move(\"Gaussian_Fit_STD.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n g1 = plt.figure(1)\n plt.xlabel('Time')\n plt.ylabel('Fit Center Position')\n plt.title(\"Gaussian Fit Center Position\")\n plt.plot(dimensionless_time, fit_cent_list)\n if store_loc != None:\n plt.savefig(\"Gaussian_Fit_Center\")\n shutil.move(\"Gaussian_Fit_Center.png\", store_loc)\n else:\n plt.show()\n plt.clf()\n\n m, b = np.polyfit(dimensionless_time, fit_cent_list, 1)\n return(dimensionless_time, fit_cent_list, m, b, amp_perc_error_list)\n\n def peak_error(self, dt, store_loc = None, plot_title = \"Mean Peak Error\", num_samples = 10, target_times = None):\n \"\"\"\n Function for calculating the error in the position of the peaks of the\n fourier transform of the system standing wave over the course of the\n simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n plot_title - (str) name for the generated plot\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n mean_peak_error_list = []\n peaks_initial = scipy.signal.find_peaks(fourier_data_list[0][1], height=.01)\n peak_pos_initial = fourier_data_list[0][0][peaks_initial[0]]\n shift = 15\n for fourier_data in fourier_data_list:\n diff_sum = 0\n\n for i in range(len(peak_pos_initial)):\n target_peak_index = peaks_initial[0][i]\n target_zone = fourier_data[1][target_peak_index - shift: target_peak_index + shift]\n #print(\"target zone = \", target_zone)\n peak = np.amax(target_zone)\n #print(\"peak = \", peak)\n peak_index = np.where(target_zone == peak)\n target_freq_zone = fourier_data[0][target_peak_index - shift:]\n peak_pos = target_freq_zone[peak_index[0][0]]\n #print(peak_pos_initial[i], peak_pos)\n\n diff_sum += (peak_pos_initial[i] - peak_pos) ** 2\n mean_peak_error_list.append(math.sqrt(diff_sum) / len(peaks_initial))\n #print()\n\n error_plot_title = \"Fourier Peaks Mean Error - (over course of Simulation)\"\n plt.plot(dimensionless_time, mean_peak_error_list)\n plt.title(error_plot_title)\n plt.xlabel(\"Dimensionless Time\")\n plt.ylabel(\"Error\")\n plt.savefig(error_plot_title)\n if store_loc != None:\n shutil.move(f\"{error_plot_title}.png\", store_loc)\n plt.clf()\n\n #save peak error to spreadsheet\n df = pd.DataFrame(mean_peak_error_list)\n df.to_excel(\"Peak_pos.xlsx\")\n shutil.move(\"Peak_pos.xlsx\", store_loc)\n\n def normalized_error(self, dt, store_loc = None, plot_title = \"Normalized Amplitude Error\", num_samples = 1000, target_times = None):\n \"\"\"\n Function for calculating the error in the amplitude of the system\n standing wave over the course of the simulation\n\n Inputs:\n dt - (float) the amount of time of a timestep\n store_loc - (str) path to directory to store generated data file\n (if left as None then the plot will be shown not saved)\n plot_title - (str) name for the generated plot\n num_samples - (int) option for the number of time shapshots to analyse\n target_times - (list) list of specific timesteps to analyse. Will\n override the num_samples property if set to non-None\n \"\"\"\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n #if time_step % sample_period == 0:\n if (time_step % sample_period == 0) and (time_step * dt * w > 200):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * dt * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w * dt)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w * dt)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n #plt.plot(p_list, gausian)\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n fractional_error_list = []\n initial_data = fourier_data_list[0]\n #initial_data = fourier_data_list[16]\n max_amp = max(initial_data[1])\n for fourier_data_i in range(len(fourier_data_list)):\n fourier_data = fourier_data_list[fourier_data_i]\n diff_sum = 0\n timestep = fourier_data_i * 500\n for i in range(len(fourier_data[0])):\n diff_sum += abs(initial_data[1][i] - fourier_data[1][i]) / max_amp\n\n # amp = fourier_data[1][i] / (math.e ** (0.0025 * timestep))\n # diff_sum += abs(initial_data[1][i] - amp) / max_amp\n\n fractional_error_list.append(diff_sum / len(fourier_data[0]))\n\n m, b = np.polyfit(dimensionless_time, fractional_error_list, 1)\n fit = []\n for time in dimensionless_time:\n fit.append(m*time + b)\n print(m)\n plt.plot(dimensionless_time, fit)\n plt.legend([f\"m = {m}, b = {b}\"])\n\n error_plot_title = \"Foureir Amplitude Normalized Difference - (over course of Simulation)\"\n plt.plot(dimensionless_time, fractional_error_list)\n plt.title(plot_title, fontsize = 7)\n plt.savefig(error_plot_title)\n if store_loc != None:\n shutil.move(f\"{error_plot_title}.png\", store_loc)\n\n df = pd.DataFrame(fractional_error_list)\n df.to_excel(\"normalized_RMSE.xlsx\")\n shutil.move(\"normalized_RMSE.xlsx\", store_loc)\n plt.clf()\n\n\n return (fractional_error_list, dimensionless_time)\n\n def integrety_test(self, dt, num_samples = 10, target_times = None):\n\n dimensionless_time = []\n w = 0.7853981633974483\n\n #pull out the indexes of the target times\n target_index = []\n if target_times == None:\n #select the time to plot the data at\n sample_period = (100000000) / num_samples\n\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n if time_step % sample_period == 0:\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w)\n target_index.append(len(self.particle_data) - 1)\n dimensionless_time.append((len(self.particle_data) - 1) * 500 * w)\n else:\n for time_step_i in range(len(self.particle_data)):\n time_step = time_step_i * 500\n time = time_step * dt\n if (time in target_times):\n target_index.append(time_step_i)\n dimensionless_time.append(time_step * w)\n\n #get fourier plot and data for either sample\n fourier_data_list = []\n for i in target_index:\n time_step = i * 500\n p_list = []\n offsets = []\n gausian = []\n for p_index in range(self.N):\n equilibriam_pos = (-self.N / 2) + p_index\n position = self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n #check for particle 0 looping around due to boundery conditions\n if p_index == 0:\n if self.particle_data[i].particles.position[p_index][0] > 0:\n position = -self.particle_data[i].particles.position[p_index][0] - equilibriam_pos\n\n p_list.append(p_index)\n offsets.append(position)\n gausian.append(abs(position))\n\n fourier_data = self.fourier_plot(p_list, offsets)\n fourier_data_list.append(fourier_data)\n\n #-----------------------------------------------------------------------\n #set up for peak analysis\n mean_peak_error_list = []\n peaks_initial = scipy.signal.find_peaks(fourier_data_list[0][1], height=.01)\n peak_pos_initial = fourier_data_list[0][0][peaks_initial[0]]\n shift = 15\n\n #set up for amplitude analysis\n mean_applitude_error_list = []\n initial_data = fourier_data_list[0]\n\n for fourier_data in fourier_data_list:\n peak_diff_sum_sqrd = 0\n amp_diff_sum_sqrd = 0\n\n #sum sqrd errors on peak positions for current timeframe\n for i in range(len(peak_pos_initial)):\n target_peak_index = peaks_initial[0][i]\n target_zone = fourier_data[1][target_peak_index - shift: target_peak_index + shift]\n peak = np.amax(target_zone)\n peak_index = np.where(target_zone == peak)\n target_freq_zone = fourier_data[0][target_peak_index - shift:]\n peak_pos = target_freq_zone[peak_index[0][0]]\n\n peak_diff_sum_sqrd += (peak_pos_initial[i] - peak_pos) ** 2\n\n #sum sqrd errors on fourier amplitude for current timeframe\n for i in range(len(fourier_data[0])):\n amp_diff_sum_sqrd += ((initial_data[1][i] - fourier_data[1][i]) ** 2)\n\n #add difference values to apropriate lists\n mean_applitude_error_list.append(math.sqrt(amp_diff_sum_sqrd / len(fourier_data[0])))\n mean_peak_error_list.append(math.sqrt(peak_diff_sum_sqrd) / len(peaks_initial))\n\n return (mean_applitude_error_list, mean_peak_error_list, dimensionless_time)\n","sub_path":"analysis_files/aural_analysis.py","file_name":"aural_analysis.py","file_ext":"py","file_size_in_byte":28798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"374005518","text":"STR2NUM = dict(\n zero=0,\n one=1,\n two=2,\n three=3,\n four=4,\n five=5,\n six=6,\n\tseven=7,\n \teight=8,\n \tnine=9\n )\n\ndef solution(s):\n s = s.lower()\n for literal, num in STR2NUM.items():\n s = s.replace(literal, str(num))\n return int(s)\n\nif __name__ == '__main__':\n solution(\"one4seveneight\")","sub_path":"kakao-2021-internship/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"84123380","text":"OFFRE_CHOICES = (\r\n ('neuf', 'Neuf'),\r\n ('louer', 'A louer'),\r\n ('vente', 'En vente'),\r\n)\r\n\r\nTYPE_CHOICES = (\r\n ('appartement', 'Appartement'),\r\n ('studio', 'Studio'),\r\n ('villas', 'Villas'),\r\n ('duplex', 'Duplex'),\r\n)\r\n\r\nNATURE_CHOICES = (\r\n ('ACD', 'ACD'),\r\n ('pas ACD', 'Non ACD'),\r\n)\r\n\r\nVILLE_CHOICES = (\r\n (\"Abengourou\", \"Abengourou\"),\r\n (\"Abidjan\", \"Abidjan\"),\r\n (\"Aboisso\", \"Aboisso\"),\r\n (\"Abongoua\", \"Abongoua\"),\r\n (\"Adaou\", \"Adaou\"),\r\n (\"Adiaké\", \"Adiaké\"),\r\n (\"Adjouan\", \"Adjouan\"),\r\n (\"Adzopé\", \"Adzopé\"),\r\n (\"Agbaou\", \"Agbaou\"),\r\n (\"Agboville\", \"Agboville\"),\r\n (\"Agnibilékrou\", \"Agnibilékrou\"),\r\n (\"Ahouanou\", \"Ahouanou\"),\r\n (\"Ahoutoué\", \"Ahoutoué\"),\r\n (\"Akouédo\", \"Akouédo\"),\r\n (\"Akoupé\", \"Akoupé\"),\r\n (\"Alépé\", \"Alépé\"),\r\n (\"Alounamouénou\", \"Alounamouénou\"),\r\n (\"Ananda (Daoukro)\", \"Ananda (Daoukro)\"), \r\n (\"Ananda (Daoukro)\", \"Ananda (Daoukro)\"), \r\n (\"Annépé\", \"Annépé\"), \r\n (\"Anyama\", \"Anyama\"), \r\n (\"Arrah (Côte-d'Ivoire)\", \"Arrah (Côte-d'Ivoire)\"),\r\n (\"Assaoufoué\", \"Assaoufoué\"), \r\n (\"Attiégouakro\", \"Attiégouakro\"),\r\n (\"Attoutou A\", \"Attoutou A\"),\r\n (\"Azaguié\", \"Azaguié\"),\r\n (\"Bacanda\", \"Bacanda\"),\r\n (\"Badikaha\", \"Badikaha\"),\r\n (\"Bako (Côte d'Ivoire)\", \"Bako (Côte d'Ivoire)\"), \r\n (\"Baléko\", \"Baléko\"),\r\n (\"Bambalouma\", \"Bambalouma\"), \r\n (\"Bandakagni-Sokoura\", \"Bandakagni-Sokoura\"),\r\n (\"Bangolo\", \"Bangolo\"),\r\n (\"Bangoua (Côte d'Ivoire)\", \"Bangoua (Côte d'Ivoire)\"),\r\n (\"Banneu\", \"Banneu\"), \r\n (\"Batéguédia II\", \"Batéguédia II\"),\r\n (\"Bazra-Nattis\", \"Bazra-Nattis\"),\r\n (\"Béoumi\", \"Béoumi\"), \r\n (\"Biankouma\", \"Biankouma\"),\r\n (\"Bingerville\", \"Bingerville\"), \r\n (\"Bongouanou\", \"Bongouanou\"), \r\n (\"Bonoua (Côte d'Ivoire)\", \"Bonoua (Côte d'Ivoire)\"),\r\n (\"Bouaflé\", \"Bouaflé\"), \r\n (\"Bouandougou\", \"Bouandougou\"), \r\n (\"Bouna (Côte d'Ivoire)\", \"Bouna (Côte d'Ivoire)\"),\r\n (\"Boundiali (ville)\", \"Boundiali (ville)\"), \r\n (\"Céchi\", \"Céchi\"), \r\n (\"Dabéko\", \"Dabéko\"), \r\n (\"Dabou\", \"Dabou\"),\r\n (\"Dabouyo\", \"Dabouyo\"), \r\n (\"Dah-Zagna\", \"Dah-Zagna\"), \r\n (\"Dakpadou\", \"Dakpadou\"), \r\n (\"Daleu\", \"Daleu\"),\r\n (\"Daloa\", \"Daloa\"),\r\n (\"Danané\", \"Danané\"),\r\n (\"Danguira\", \"Danguira\"),\r\n (\"Daoukro\", \"Daoukro\"),\r\n (\"Diabo (Côte d'Ivoire)\", \"Diabo (Côte d'Ivoire)\"),\r\n)\r\n\r\n\"\"\"\r\n\"Diamarakro\", \r\n\"Diangobo (Yakassé-Attobrou)\", \r\n\"Diawala\", \r\n\"Diboké\", \r\n\"Didiévi\", \r\n\"Diéouzon\", \r\n\"Digbeugnoa\", \r\n\"Dignago\", \r\n\"Dikouehipalegnoa\", \r\n\"Dimbokro\", \r\n\"Diogo (Boundiali)\", \r\n\"Dioulatiédougou\", \r\n\"Divo\", \r\n\"Djouroutou\", \r\n\"Doba (Côte d'Ivoire)\", \r\n\"Dogbo\", \r\n\"Doké\", \r\n\"Domaboué\", \r\n\"Domangbeu\", \r\n\"Douasso\", \r\n\"Doubé\", \r\n\"Doudoukou\", \r\n\"Duékoué\", \r\n\"Ebounou\", \r\n\"Elima (Côte d'Ivoire)\", \r\n\"Ery-Macouguié\", \r\n\"Fadiadougou\", \r\n\"Fahandougou\", \r\n\"Fahani\", \r\n\"Fakaha\", \r\n\"Fala (Côte d'Ivoire)\", \r\n\"Famienkro\", \r\n\"Faradiani\", \r\n\"Farandougou\", \r\n\"Ferkessédougou\", \r\n\"Fodio\", \r\n\"Fonondara\", \r\n\"Fresco (Côte d'Ivoire)\", \r\n\"Gabiadji\", \r\n\"Gagnoa\", \r\n\"Ganaoni\", \r\n\"Gbambiasso\", \r\n\"Gbangbégouiné\", \r\n\"Gbangbégouiné-Yati\", \r\n\"Gbékékro\", \r\n\"Gbéléban\", \r\n\"Gbémou\", \r\n\"Gbogui\", \r\n\"Gbon (Côte d'Ivoire)\", \r\n\"Gbongaha\", \r\n\"Gnaliepa\", \r\n\"Gnangnon\", \r\n\"Gohouo-Zagna\", \r\n\"Gomon\", \r\n\"Gonaté\", \r\n\"Gouessesso\", \r\n\"Gouiné\", \r\n\"Goulia\", \r\n\"Grabo (Côte d'Ivoire)\", \r\n\"Grand-Bassam\", \r\n\"Grand-Béréby\", \r\n\"Grand-Lahou\", \r\n\"Grand-Morié\", \r\n\"Grand-Zattry\", \r\n\"Guessabo\", \r\n\"Guéyo\", \r\n\"Guiendé\", \r\n\"Guiglo\", \r\n\"Guinglo-Tahouaké\", \r\n\"Issia\", \r\n\"Jacqueville (Côte d'Ivoire)\", \r\n\"Kahin-Zarabaon\", \r\n\"Kanakono\", \r\n\"Kaniéné\", \r\n\"Kanitélégué\", \r\n\"Kanoroba\", \r\n\"Kantélégué\", \r\n\"Kanzra\", \r\n\"Kaouara\", \r\n\"Karakoro (Côte d’Ivoire)\", \r\n\"Karakpo\", \r\n\"Kasséré\", \r\n\"Katiéré\", \r\n\"Katiola\", \r\n\"Kimbirila-Sud\", \r\n\"Koboko\", \r\n\"Kodiokofi\", \r\n\"Kofiplé\", \r\n\"Kolia\", \r\n\"Kong (Côte d'Ivoire)\", \r\n\"Kongasso\", \r\n\"Koni (Côte d'Ivoire)\", \r\n\"Konolo\", \r\n\"Korhogo\", \r\n\"Koro (Côte d'Ivoire)\", \r\n\"Kossou\", \r\n\"Kouakro\", \r\n\"Kouan-Houle\", \r\n\"Kouassi-Blékro\", \r\n\"Koukourandoumi\", \r\n\"Kounoumon\", \r\n\"Kouto\", \r\n\"Kpata\", \r\n\"Lahou-Kpanda\", \r\n\"Lakota (Côte d'Ivoire)\", \r\n\"Languibonou\", \r\n\"Lataha\", \r\n\"Liliyo\", \r\n\"Lodala\", \r\n\"Logoualé\", \r\n\"Logouhi\", \r\n\"Lohouré\", \r\n\"Lokoligou\", \r\n\"Lolobo (Yamoussoukro)\", \r\n\"Lomokankro\", \r\n\"Loplé\", \r\n\"Lossingué\", \r\n\"Lotono\", \r\n\"Loupala\", \r\n\"Loupougo\", \r\n\"Loviguié\", \r\n\"M'bahiakro\",\r\n\"\"\"","sub_path":"immobilier/ville.py","file_name":"ville.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"99739001","text":"import numpy as np\nfrom numpy import genfromtxt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport sys\n\nfig = plt.figure()\ncsv_data = genfromtxt('./data/converted/{0}'.format(sys.argv[1]), delimiter=',')\n\n# data = csv_data[1:, 2:17]\ndata = csv_data\n\n# data is a n x 15 array\nrows, cols = data.shape\n\nvis_rows = cols * 3\nvis_data = np.random.rand(vis_rows, cols)\n\nim = plt.imshow(vis_data, animated=True)\ni = 0\n\nani = None\npause = False\n\n# allows you to pause/play by clicking on figure\ndef onClick(event):\n global pause\n pause ^= True\n\n if pause:\n ani.event_source.stop()\n else:\n ani.event_source.start()\n\n# animation update function\ndef updatefig(*args):\n global vis_data, data, i, vis_rows, im, rows, ani, pause\n try:\n vis_data[:,:] = data[i:i + vis_rows, :]\n except:\n plt.close(fig)\n sys.exit()\n\n im.set_array(vis_data)\n i += 1\n return im,\n\nfig.canvas.mpl_connect('button_press_event', onClick)\nani = animation.FuncAnimation(fig, updatefig, interval=10, blit=True)\nplt.show()","sub_path":"visualize_threads.py","file_name":"visualize_threads.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"340751311","text":"from copy import copy\n\ndef printarray(x):\n print(\" \".join([str(v) for v in x]))\n\ndef bubblesort(x):\n flag = True\n while flag:\n flag = False\n for j in range(len(x)-1, 0, -1):\n if x[j][1] < x[j-1][1]:\n x[j], x[j-1] = x[j-1], x[j]\n flag = True\n\ndef selectsort(x):\n for i in range(0, len(x)):\n minj = i\n for j in range(i, len(x)):\n if x[j][1] < x[minj][1]:\n minj = j\n if minj != i:\n x[minj], x[i] = x[i], x[minj]\n\n# ALDS1_2_C: 安定ソート\ndef main():\n input() # skip scan n\n x = [v for v in input().split(\" \")]\n y = copy(x)\n bubblesort(x)\n selectsort(y)\n printarray(x)\n print(\"Stable\")\n printarray(y)\n if x == y:\n print(\"Stable\")\n else:\n print(\"Not stable\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"aoj/src/ALDS1_2_C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"235602113","text":"'''\nWrite a Program to that prints series of Even numbers in reverse\norder from the limiting number entered by user.\n'''\n\n\nnum=int(input(\"Enter the number:\"))\n\nfor x in range(num,-1,-1):\n\n if(x % 2 == 0):\n\n print(x,end=\" \")\n\n\nprint()\n\n","sub_path":"Day 17/17-DailyFlash_Solutions/01_Feb_Solutions_Four/Python/Program2.py","file_name":"Program2.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"32012871","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import JsonResponse\nfrom models.models import EntradaCC, Camara, Compra, Tienda, SalidaCC\nfrom django.db import models,connection\nfrom django.db.models import Count,Sum\nimport datetime\nimport psycopg2 as psy, pandas as pd\n# Create your views here.\n\nclass Graph(View):\n\n def get(self,request, *args, **kwargs):\n conn=psy.connect(host = 'localhost', user= 'postgres', password ='postgres', dbname= 'Sambil')\n cur = conn.cursor()\n template_name = 'graph.html'\n number =[0,1,2,3]\n result = []\n sql='''SELECT count(*) FROM public.models_entradacc WHERE fkcamara_id=1;'''\n df = pd.read_sql_query(sql, conn)\n for index, row in df.iterrows():\n result.append(row['count'])\n print(result)\n return render(request,template_name, {} )\n\ndef get_data(request, *args, **kwargs):\n #variables :\n conn=psy.connect(host = 'localhost', user= 'postgres', password ='postgres', dbname= 'Sambil')\n cur = conn.cursor()\n cuenta = []\n camara = []\n SumVentas = []\n tienda = []\n horaEntrada1 = []\n horaEntrada2 =[]\n horaEntrada3 =[]\n horaSalida1 = []\n horaSalida2 = []\n horaSalida3 = []\n cantPersonas = []\n\n #querys para contar las personas por camara \n\n q1 = EntradaCC.objects.values('fkcamara__id').annotate(cuenta=Count('id'))\n for l in q1:\n cuenta.append(l['cuenta'])\n camara.append(l['fkcamara__id'])\n print(camara)\n\n # query para mostrar la cantidad de personas que entraron al centro comercial con y sin telefono\n\n q2 = EntradaCC.objects.values('id').filter(macadd__isnull=True).count()\n q3 = EntradaCC.objects.values('id').filter(macadd__isnull=False).count()\n\n # querys para determinar la edad de las personas que entran al centro comercial determinando si tiene o no macAddres\n \n edad1 = EntradaCC.objects.values('edad').filter(edad__range=(0,9), macadd__isnull=True).count()\n edad3 = EntradaCC.objects.values('edad').filter(edad__range=(10,20), macadd__isnull=True).count()\n edad5 = EntradaCC.objects.values('edad').filter(edad__range=(21,30), macadd__isnull=True).count()\n edad7 = EntradaCC.objects.values('edad').filter(edad__range=(31,40), macadd__isnull=True).count()\n edad9 = EntradaCC.objects.values('edad').filter(edad__range=(41,50), macadd__isnull=True).count()\n edad11 = EntradaCC.objects.values('edad').filter(edad__range=(51,60), macadd__isnull=True).count()\n edad13 = EntradaCC.objects.values('edad').filter(edad__range=(61,70), macadd__isnull=True).count()\n edad15 = EntradaCC.objects.values('edad').filter(edad__range=(71,80), macadd__isnull=True).count()\n edad17= EntradaCC.objects.values('edad').filter(edad__range=(81,90), macadd__isnull=True).count()\n \n edad2 = EntradaCC.objects.values('edad').filter(edad__range=(0,9), macadd__isnull=False).count()\n edad4 = EntradaCC.objects.values('edad').filter(edad__range=(10,20), macadd__isnull=False).count() \n edad6 = EntradaCC.objects.values('edad').filter(edad__range=(21,30), macadd__isnull=False).count()\n edad8 = EntradaCC.objects.values('edad').filter(edad__range=(31,40), macadd__isnull=False).count()\n edad10 = EntradaCC.objects.values('edad').filter(edad__range=(41,50), macadd__isnull=False).count()\n edad12 = EntradaCC.objects.values('edad').filter(edad__range=(51,60), macadd__isnull=False).count()\n edad14 = EntradaCC.objects.values('edad').filter(edad__range=(61,70), macadd__isnull=False).count()\n edad16= EntradaCC.objects.values('edad').filter(edad__range=(71,80), macadd__isnull=False).count()\n edad18 = EntradaCC.objects.values('edad').filter(edad__range=(81,90), macadd__isnull=False).count()\n \n # querys que muestran las tiendas que vende mucho mas [top 5]\n qventas = Compra.objects.values('fktienda_id__nombre').annotate(ventas=Sum('total'))[:5]\n for l in qventas:\n tienda.append(l['fktienda_id__nombre'])\n SumVentas.append(l['ventas'])\n \n # query para visualizar las entradas de las personas por entrada \n\n sql='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=1 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql, conn)\n for index, row in df.iterrows():\n horaEntrada1.append(int(row['count']))\n\n sql1='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=2 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql1, conn)\n for index, row in df.iterrows():\n horaEntrada2.append(int(row['count']))\n\n sql2='''SELECT count(id) FROM public.models_entradacc WHERE fkcamara_id=3 AND date_part('month',registroe)=date_part('month',current_date)\n GROUP BY date_part('hour',registroe);'''\n df = pd.read_sql_query(sql2, conn)\n for index, row in df.iterrows():\n horaEntrada3.append(int(row['count']))\n \n # querys para ver el flujo de salida de las personas por hora\n\n sql3='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=1 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql3, conn)\n for index, row in df.iterrows():\n horaSalida1.append(int(row['count']))\n\n sql4='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=2 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql4, conn)\n for index, row in df.iterrows():\n horaSalida2.append(int(row['count']))\n\n sql5='''SELECT count(id) FROM public.models_salidacc WHERE fkcamara_id=3 AND date_part('month',registros)=date_part('month',current_date)\n GROUP BY date_part('hour',registros);'''\n df = pd.read_sql_query(sql5, conn)\n for index, row in df.iterrows():\n horaSalida3.append(int(row['count']))\n\n\n\n data = {\n \"labels\": camara,\n \"default\":cuenta,\n \"default2\":[q2,q3],\n \"labels2\":['Personas sin telefono','Personas con telefono'],\n \"default3\":[edad1,edad3,edad5, edad7, edad9,edad11,edad13,edad15,edad17],\n \"default4\":[edad2,edad4,edad6,edad8,edad10,edad12,edad14,edad16,edad18],\n \"labels4\": tienda,\n \"default5\": SumVentas,\n \"labelsHora\":['8:00 am','9:00 am','10:00 am','11:00 am', '12:00 pm', '13:00 pm','14:00 pm','15:00 pm','16:00 pm','17:00 pm','18:00 pm', '19:00 pm','20:00 pm', '21:00 pm', '22:00 pm', '23:00 pm'],\n \"default6\":horaEntrada1,\n \"default7\":horaEntrada2,\n \"default8\":horaEntrada3,\n \"default9\":horaSalida1,\n \"default10\":horaSalida2,\n \"default11\":horaSalida3,\n\n }\n return JsonResponse(data)","sub_path":"models/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"23497491","text":"from keyword_relation.models import Keyword_Pages\n\nfrom django.shortcuts import render\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.conf.urls.static import static\nimport os\nfrom django.conf import settings\nimport numpy as np\nimport random\nfrom gensim.models import Word2Vec\nimport wikipedia\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import wordnet\nimport pandas\n\nimport wikipediaapi\n\nwiki_wiki = wikipediaapi.Wikipedia('en')\n\ndef run():\n\n print(\"START\")\n\n\n # Get all keywords\n all_keywords = Keyword_Pages.objects.all()\n\n # count\n count = 0\n\n # iterate over all keywords\n for keyword in all_keywords:\n\n count += 1\n if count % 1000 == 0:\n print(count)\n\n # get model datamodels\n if keyword.google_graph_embedding == \"\":\n model_path = os.path.join(settings.STATIC_ROOT,'../models/related_keywords_graph_embedding.model')\n model = Word2Vec.load(model_path)\n\n related_main = find_similar_keywords(model, keyword.keyword)\n\n keyword.google_graph_embedding = related_main\n keyword.save()\n\n # get wiki path\n if keyword.wiki_path == \"\":\n print(\"Finding path for\", keyword.keyword)\n visited = set()\n wiki_paths = wiki_bfs(keyword.keyword, \"Glossary of computer science\", visited, 0, [], 100)\n wiki_path = get_probability_score(wiki_paths)\n\n\n if wiki_path == \"N/A\":\n wiki_path_str = wiki_path\n else:\n first = True\n wiki_path_str = \"\"\n for val in wiki_path:\n if first:\n wiki_path_str += val\n first = False\n else:\n wiki_path_str += \" --> \" + val\n \n print(wiki_path_str)\n\n keyword.wiki_path = wiki_path_str\n keyword.save()\n\n\ndef wiki_bfs(source, target, visited, num_found, found_paths, iter_limit):\n queue = []\n visited.add(source)\n queue.append([source])\n iter_count = 0\n output = []\n while len(queue) > 0 and iter_count <= iter_limit:\n iter_count += 1\n path_attempt = queue.pop(0)\n v = path_attempt[-1]\n if v == target.lower():\n if path_attempt not in output:\n output.append(path_attempt)\n# print(output)\n# for val in path_attempt:\n# try:\n# visited.remove(val)\n# except:\n# pass\n visited.remove(target.lower())\n iter_count = 0\n if len(output) == 3:\n # print(\"hit\")\n return output\n try:\n v = wiki_wiki.page(v)\n except:\n continue\n edges = [x.lower() for x in v.links]\n index_push = 0\n for edge in edges:\n if (edge in target.lower() or target.lower() in edge) and edge not in visited:\n visited.add(edge)\n new_path_attempt = path_attempt[:]\n new_path_attempt.append(edge)\n if edge == target.lower():\n queue.insert(0, new_path_attempt)\n index_push += 1\n queue.insert(index_push, new_path_attempt)\n# print(queue)\n \n for edge in edges:\n if edge not in visited:\n visited.add(edge)\n new_path_attempt = path_attempt[:]\n new_path_attempt.append(edge)\n queue.append(new_path_attempt)\n # print(\"out\", iter_count)\n # print(len(queue))\n return output\n\n\ndef get_probability_score(path):\n\n if path == []:\n return \"N/A\"\n\n all_probs = []\n for i in range(len(path)):\n probabilities_path = []\n for val in path[i]:\n probabilities = 1/(len(wiki_wiki.page(val).links))\n probabilities_path.append(probabilities)\n all_probs.append((sum(probabilities_path), path[i]))\n\n all_probs.sort(key = lambda x: x[0]) \n return all_probs[0][1]\n\n# function to get related keywords\ndef find_similar_keywords(model, x):\n output = \"\"\n first = True\n try:\n count = 0\n for node, _ in model.wv.most_similar(x):\n if first:\n output += node\n first = False \n else:\n output += \"|\" + node\n count += 1\n if count >=5:\n break\n except:\n # print(x, \"not in graph\")\n output=\"NA\"\n return output","sub_path":"scripts/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"489511400","text":"import win32api\nimport win32gui\nimport win32con\nimport time\nimport random\n\nfrom control.base_control import BaseControl\n\nimport common.screen as screen\n\nRIGHT = 0\nDOWN = 1\nLEFT = 2\n\n\nclass ReplyMapCommon(BaseControl):\n\n _scranDirection = 0 # 0 → 1 ↓ 2←\n _nextScranDirection = 0\n _isScranMap = False\n\n team1BattleMaxCount = 5\n team2BattleMaxCount = 0\n\n def __init__(self, handle, interval):\n self.handle = handle\n self.interval = interval\n\n def getEnemyLocation(self):\n\n imgs = [\"enemy\\\\ship_p1_45_45_55_55.png\",\n \"enemy\\\\ship_p2_45_45_55_55.png\",\n \"enemy\\\\ship_p3_45_45_55_55.png\",\n \"enemy\\\\ship_p4_45_45_55_55.png\",\n \"enemy\\\\ship_z1_45_45_55_55.png\",\n \"enemy\\\\ship_z2_45_45_55_55.png\",\n \"enemy\\\\ship_z3_45_45_55_55.png\",\n \"enemy\\\\ship_h1_45_45_55_55.png\",\n \"enemy\\\\ship_h1_45_45_55_55.png\",\n \"enemy\\\\ship_h2_45_45_55_55.png\",\n \"enemy\\\\ship_q1_45_45_55_55.png\",\n \"enemy\\\\ship_q2_45_45_55_55.png\",\n ]\n\n # random.shuffle(imgs)\n for i in range(len(imgs)):\n xylist = screen.matchResImgInWindow(\n self.handle, imgs[i],0.7)\n if len(xylist) > 0:\n return xylist\n \n \n\n return []\n\n def getBossLocation(self):\n imgs = [\"enemy\\\\d1_4_boss_45_45_55_55.png\",\n \"enemy\\\\d1_2_boss_45_45_55_55.png\",\n \"enemy\\\\d1_3_boss_45_45_55_55.png\",\n \"enemy\\\\boss_48_45_52_55.png\",\n ]\n\n random.shuffle(imgs)\n for i in range(len(imgs)):\n xylist = screen.matchResImgInWindow(\n self.handle, imgs[i],0.7)\n if len(xylist) > 0:\n return xylist\n \n\n return []\n\n\n def dragPerLeft(self):\n self.dragPer(10, 50, 80, 50)\n\n def dragPerRight(self):\n self.dragPer(80, 50, 10, 50)\n\n def dragPerUp(self):\n self.dragPer(50, 20, 50, 70)\n\n def dragPerDown(self):\n self.dragPer(50, 70, 50, 20)\n\n \n def resetMapPosition(self):\n if not self._isScranMap:\n winHash = \"\"\n while not screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n winHash = screen.winScreenHash(self.handle )\n self.dragPerUp()\n \n winHash = \"\" \n while not screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n winHash = screen.winScreenHash(self.handle )\n self.dragPerLeft()\n \n \n self._needResetMap = False\n self._scranMapEnd = False\n self._scranDirection = 0\n\n def scranDragMap(self): # 全图扫描\n winHash = screen.winScreenHash(self.handle )\n self._isScranMap = True\n if self._scranDirection == RIGHT:\n self.dragPerRight()\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._nextScranDirection = LEFT\n self._scranDirection = DOWN\n return\n if self._scranDirection == DOWN:\n self.dragPerDown()\n # 换方向左右\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._isScranMap = False # 扫完全图\n return\n\n self._scranDirection = self._nextScranDirection\n if self._scranDirection == LEFT:\n self.dragPerLeft()\n \n if screen.alikeHash(winHash ,screen.winScreenHash(self.handle),0.8) :\n self._nextScranDirection = RIGHT # 左边到尽头 下去后往右\n self._scranDirection = DOWN\n return\n\n def findAndBattle(self):\n\n if self._teamNum == 1:\n if self._team1BattleCount < self.team1BattleMaxCount:\n xylist = self.getEnemyLocation()\n minX=self.getPosX(15)\n # maxY=self.getPosY(80)\n resList=[]\n for point in xylist:\n if point[0]>=minX:\n resList.append(point)\n if len(resList) > 0:\n x, y = resList[0]\n # self.leftClick(x, y)\n cx=self.getPosX(50)\n cy=self.getPosY(50)\n self.drag(x,y,cx,cy) #拖动不是一比一 大概是一半\n time.sleep(2)\n self.drag(x,y,cx,cy) \n self.leftClick(cx, cy)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n\n else:\n time.sleep(10)\n self.switchTeam()\n self._teamNum = 2\n\n if self._teamNum == 2:\n if self._team2BattleCount < self.team2BattleMaxCount:\n xylist = self.getEnemyLocation()\n if len(xylist) > 0:\n x, y = xylist[0]\n # self.leftClick(x, y)\n cx=self.getPosX(50)\n cy=self.getPosY(50)\n self.drag(x,y,cx,cy) #拖动不是一比一 大概是一半\n time.sleep(2)\n self.drag(x,y,cx,cy) \n self.leftClick(cx, cy)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n else:\n xylist = self.getBossLocation()\n if len(xylist) > 0:\n x, y = xylist[0]\n self.leftClick(x, y)\n time.sleep(5)\n else:\n self.resetMapPosition()\n self.scranDragMap()\n","sub_path":"core/control/reply_map_common.py","file_name":"reply_map_common.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"333482694","text":"\"\"\"\n========================================\n13. Writing modified files with MNE-BIDS\n========================================\n\nMNE-BIDS is designed such that it enforces good practices when working with\nBIDS data. One of the principles of creating BIDS datasets from raw data is\nthat the raw data should ideally be written unmodified, as-is. To enforce\nthis, :func:`mne_bids.write_raw_bids` performs some basic checks and will\nthrow an exception if it believes you're doing something that you really\nshouldn't be doing (i.e., trying to store modified \"raw\" data as a BIDS\nraw data set.)\n\nThere might be some – rare! – situations, however, when working around this\nintentional limitation in MNE-BIDS can be warranted. For example, you might\nencounter data that has manually been split across multiple files during\nrecording, even though it belongs to a single experimental run. In this case,\nyou might want to concatenate the data before storing them in BIDS. This\ntutorial will give you an example on how to use :func:`mne_bids.write_raw_bids`\nto store such data, despite it being modified before writing.\n\n.. warning:: Please be aware that the situations in which you would need\n to apply the following solution are **extremely** rare. If you\n ever find yourself wanting to apply this solution, please take a\n step back, take a deep breath and re-consider whether this is\n **absolutely** necessary. If even a slight doubt remains,\n reach out to the MNE-BIDS developers.\n\n\"\"\"\n\n# Authors: Richard Höchenberger \n# License: BSD-3-Clause\n\n# %%\n# Load the ``sample`` dataset, and create a concatenated raw data object.\n\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport mne\nfrom mne.datasets import sample\n\nfrom mne_bids import write_raw_bids, BIDSPath\n\n\ndata_path = Path(sample.data_path())\nraw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif'\noutput_path = data_path / '..' / 'MNE-sample-data-bids'\nbids_path = BIDSPath(subject='01', task='audiovisual', root=output_path)\n\nraw = mne.io.read_raw_fif(raw_fname)\nraw.info['line_freq'] = 60\nraw_concat = mne.concatenate_raws([raw.copy(), raw])\n\n# %%\n# Trying to write these data will fail.\n\ntry:\n write_raw_bids(raw=raw_concat, bids_path=bids_path, overwrite=True)\nexcept ValueError as e:\n print(f'Data cannot be written. Exception message was: {e}')\n\n# %%\n# We can work around this limitation by first writing the modified data to\n# a temporary file, reading it back in, and then writing it via MNE-BIDS.\n\nwith NamedTemporaryFile(suffix='_raw.fif') as f:\n fname = f.name\n raw_concat.save(fname, overwrite=True)\n raw_concat = mne.io.read_raw_fif(fname, preload=False)\n write_raw_bids(raw=raw_concat, bids_path=bids_path, overwrite=True)\n\n# %%\n# That's it!\n#\n# .. warning:: **Remember, this should only ever be a last resort!**\n#\n","sub_path":"examples/write_modified_files.py","file_name":"write_modified_files.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"327939080","text":"class Account(object):\n def __init__(self, owner, balance):\n self.owner = owner\n self.balance = balance\n\n def deposit(self, amount):\n if amount < 0:\n raise AssertionError\n self.balance += amount\n print('Deposit accepted')\n\n def withdraw(self, amount):\n if self.balance < amount:\n print('Funds Unavailable!')\n raise AssertionError\n self.balance -= amount\n print('Withdrawal accepted')\n\n def __str__(self):\n return f'Account owner: {self.owner}\\nAccount balance: {self.balance}'\n\n\nacct1 = Account('Jose', 100)\nprint(acct1)\nprint(acct1.balance)\n\nacct1.deposit(50)\nacct1.withdraw(75)\nacct1.withdraw(500)\n\n","sub_path":"05-Object Oriented Programming/Account.py","file_name":"Account.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"408561844","text":"#import math\n\n#x = ''' 1427 0 \n\n #876652098643267843 \n#5276538'''\n#xs = x.split()\n#xs.reverse()\n#for i in xs:\n\t#i = math.sqrt(int(i))\n\t#if str(i) == '0.0':\n\t\t#print('0.0000')\n\t#else:\n\t\t#print(str(round(i,4)))\n\n\nimport math\nimport sys\n\nx = sys.stdin.readlines()\nxs = x.split()\nxs.reverse()\nfor i in xs:\n\ti = math.sqrt(int(i))\n\tprint('%.4f' % i)\n\n","sub_path":"timus1001.py","file_name":"timus1001.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"646758528","text":"motorcycles = ['honda', 'yamaha', 'sazuki', 'honda', 'honda']\n\n#motorcycles = [motorcycle for motorcycle in motorcycles if motorcycle != 'honda']\n\nmotorcycles = [i for i in motorcycles if i != 'honda']\n\nfor i in range(10):\n print(f\"Hello, {i}\")\n\nwhile None:\n #motorcycles.remove('honda')\n print('test')\n\na = [\"Hello\", \"World\"]\nb = [\"Hello\", \"World\"]\n\nif a is b:\n print(\"a is b\")\nif a == b:\n print(\"a == b\")","sub_path":"basics/chapter_3/programs/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"418174116","text":"from django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\n\nfrom rest_framework.exceptions import ValidationError\n\n\nclass EmailService:\n @classmethod\n def send_email(cls, data):\n # is_debug = getattr(settings, \"DEBUG\", False)\n # if is_debug:\n # print(\n # \"--- Email not sent because DEBUG is TRUE. Email data below. ---\" # noqa\n # )\n # print(data)\n # return None\n\n to_email = data.get(\"to_email\")\n subject = data.get(\"subject\")\n template_data = data.get(\"meta\") or {}\n template_data[\"subject\"] = subject\n if not data.get(\"templates\"):\n raise ValidationError(\"templates is required\")\n html = render_to_string(data.get(\"templates\"), template_data)\n\n message = EmailMultiAlternatives(\n subject,\n html,\n settings.DEFAULT_FROM_EMAIL,\n [to_email],\n )\n try:\n message.attach_alternative(html, \"text/html\")\n message.send()\n except Exception as e:\n print(f\"Email exception: {e}\")\n\n return None\n\n @classmethod\n def send_verification_email(cls, email, first_name, token):\n data = {\n \"to_name\": first_name,\n \"to_email\": email,\n \"subject\": \"Test\",\n \"templates\": \"email/reset_password.html\",\n \"text\": \"Your code {}\",\n \"meta\": {\n \"first_name\": first_name or \"No First Name\",\n \"token\": token.key,\n },\n }\n from pprint import pprint\n pprint(data)\n return cls.send_email(data)\n","sub_path":"base/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"206813233","text":"# A Representation for an \"Action4\" in a game of Evolution\nfrom gainPopulation import *\nfrom gainBodySize import *\nfrom buySpeciesBoard import *\nfrom replaceTrait import *\n\nclass Action4:\n\n\t\"\"\"\n\t\tConstruct a new Action4\n\t\t@param cardIdx: the index of the traitCard donated by the Player in their hand\n\t\t@param GP: a list of zero or more GainPopulation \n\t\t@param GB: a list of zero or more GainBodySize \n\t\t@param BT: a list of zero or more BuySpeciesBoard \n\t\t@param RT: a list of zero or more ReplaceTrait \n\t\tNat, [GainPopulation, ...], [GainBodySize, ...], [BuySpeciesBoard, ...], [ReplaceTrait, ...] -> Void\n\t\"\"\"\n\tdef __init__(self, cardIdx, GP, GB, BT, RT):\n\t\tself.tribute = cardIdx\n\t\tself.GP = GP\n\t\tself.GB = GB\n\t\tself.BT = BT\n\t\tself.RT = RT\n\n\t\"\"\"\n\t\tGets all card indexes referenced in this action. Used for cheating checks\n\t\t@return a list of every card index within this action\n\t\tVoid -> ListOf(Nat)\n\t\"\"\"\n\tdef getAllCardIdcs(self):\n\t\tidcs = [self.tribute]\n\t\tidcs += [p.cardIdx for p in self.GP]\n\t\tidcs += [b.cardIdx for b in self.GB]\n\t\tidcs += [bt.payment for bt in self.BT]\n\t\tfor bt in self.BT:\n\t\t\tidcs += bt.traitList\n\t\tidcs += [rt.newTraitIdx for rt in self.RT]\n\t\treturn idcs\n\n\t\"\"\"\n\t\tGets all species indexes referenced in this action. Used for cheating checks\n\t\t@return a list of every species index within this action\n\t\tVoid -> ListOf(Nat)\n\t\"\"\"\n\tdef getAllSpecIdcs(self):\n\t\tidcs = [p.specIdx for p in self.GP]\n\t\tidcs += [b.specIdx for b in self.GB]\n\t\tidcs += [rt.specIdx for rt in self.RT]\n\t\treturn idcs\n\n\t\"\"\"\n\t\tCreates a json representation of this action\n\t\t@return a JsonArray representing an action\n\t\tVoid -> JsonArray\n\t\"\"\"\n\tdef actionToJson(self):\n\t\treturn [self.tribute, \n\t\t\t\t[p.toJson() for p in self.GP],\n\t\t\t\t[b.toJson() for b in self.GB],\n\t\t\t\t[bt.toJson() for bt in self.BT],\n\t\t\t\t[rt.toJson() for rt in self.RT]]\n\n\t\"\"\"\n\tConstruct an Action4 from the given JSON input\n\tEFFECT: if the input is invalid, quit\n\t@param action4: JSON representation of an Action4\n\t@param player: PlayerState that this action corresponds \n\t@return an Action4 equivalent to the JSON\n\tJSON -> Action4\n\t\"\"\"\n\t@staticmethod\n\tdef actionFromJson(action4):\n\t\tAction4.validate(action4)\n\t\tcardIdx, GP, GB, BT, RT = action4\n\n\t\treturn Action4(cardIdx, [GainPopulation.fromJson(p) for p in GP], \n\t\t\t\t\t\t\t\t[GainBodySize.fromJson(b) for b in GB], \n\t\t\t\t\t\t\t\t[BuySpeciesBoard.fromJson(buyt) for buyt in BT], \n\t\t\t\t\t\t\t\t[ReplaceTrait.fromJson(rept) for rept in RT])\n\n\t\"\"\"\n\t\tValidate a JSON Action4\n\t\tEFFECT: if not valid, quit\n\t\t@param action4: JSON representation of an Action4\n\t\t@param player: PlayerState that this action corresponds \n\t\tJSON -> Void\n\t\"\"\"\n\t@staticmethod\n\tdef validate(action4):\n\t\tcardIdx, GP, GB, BT, RT = action4\n\t\tif not (len(action4) == 5 and type(cardIdx) == int):\n\t\t\tquit()\n\t\telse:\n\t\t\t[GainPopulation.validate(p) for p in GP]\n\t\t\t[GainBodySize.validate(b) for b in GB]\n\t\t\t[BuySpeciesBoard.validate(buyt) for buyt in BT]\n\t\t\t[ReplaceTrait.validate(rept) for rept in RT]\n","sub_path":"12/dealer/action4.py","file_name":"action4.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"310432194","text":"import numpy as np\nimport pandas as pd\nimport random as rn\nimport time\nimport jgraph as ig\nimport random as rn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(style=\"whitegrid\")\nsns.set_color_codes()\n\n\n#custome libaries\nfrom data_preprocessing_IoT import IoT_data_common\n#from Autoencoder_IoT_model import build_iot_AE\nfrom encoder import EncoderForest\n\nimport utils\n\nparams = {'dataset': 'IoT-23'}\n\n###calling IoT-23 dataset####\nprint(\"Loading dataset IoT-23.....\\n\")\ntrain_data, train_labels, test_data, test_labels = IoT_data_common(params)\nprint(\"train shape: \", train_data.shape)\nprint(\"test shape: \", test_data.shape)\nprint(\"train_label shape: \", train_labels.shape)\nprint(\"test_label shape: \", test_labels.shape)\n\ntest_label_original = np.argmax(test_labels, axis=1)\n\n# scikit learn solution\nss_500 = time.time()\nencoder = EncoderForest(500)\nencoder.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded = encoder.encode(train_data)\nprint(\"end encode\")\nee_500 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_500-ss_500))\n\n# scikit learn solution\nss_1000 = time.time()\nencoder_1k = EncoderForest(1000)\nencoder_1k.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded_1k = encoder_1k.encode(train_data)\nprint(\"end encode\")\nee_1000 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_1000-ss_1000))\n\n# scikit learn solution\nss_2000 = time.time()\nencoder_2k = EncoderForest(2000)\nencoder_2k.fit(train_data, max_depth=10)\nprint(\"end fit\")\nencoded_2k = encoder_2k.encode(train_data)\nprint(\"end encode\")\nee_2000 = time.time()\nprint('{:.3f} sec, Scikit Learn Normal'.format(ee_2000-ss_2000))\n\nimg_prime_1k = encoder_1k.decode(encoded_1k[100000])#.reshape(10, 10)\nprint(\"end decode\",img_prime_1k)\n\nimg_prime_2k = encoder_2k.decode(encoded_2k[10000])#.reshape(10, 10)\nprint(\"end decode\",img_prime_2k)\n\nimg_prime = encoder.decode(encoded[100078])#.reshape(10, 10)\nprint(\"end decode\",img_prime)\n\nss1=np.argsort(img_prime)\nprint(\"\\n decoded result \\n:\", ss1)\n\nss2=np.argsort(img_prime_1k)\nprint(\"\\n decoded result \\n:\", ss2)\n\nss3=np.argsort(img_prime_2k)\nprint(\"\\n decoded result \\n:\", ss3)\n\nf = plt.figure(figsize=(20,10))\nplt.subplot(1,3,1)\nsns.distplot(img_prime, kde=True, color=\"r\")\nplt.title('EncoderForest with 500 trees and depth 20')\nplt.xlabel('Reconstruction Error')\n\nplt.subplot(1,3,2)\nsns.distplot(img_prime_1k, kde=True, color=\"k\")\nplt.title('EncoderForest with 1k trees and depth 20')\nplt.xlabel('Reconstruction Error')\n\nplt.subplot(1,3,3)\nsns.distplot(img_prime_2k, kde=True, color=\"b\")\nplt.title('EncoderForest with 2k trees and depth 20')\nplt.xlabel('Reconstruction Error')\nplt.show()\n\n#sns.countplot(encoder.decode(encoded[100000]))\n\n'''\ndf = pd.DataFrame(data=train_data)\nf = plt.figure(figsize=(20,10))\nplt.subplot(1,1,1)\nsns.pairplot(df, diag_kind=\"kde\")\nplt.title('Distribution plot')\nplt.show()\n'''\n\nSorted=True\nfig = plt.figure(figsize=(20,10))\nax1 = plt.subplot(122, projection='polar')\nrn, thetan = utils.getVals(encoded,np.array([0.,0.]),sorted=Sorted)\nfor j in range(len(rn)):\n ax1.plot([thetan[j],thetan[j]], [1,rn[j]], color='b',alpha=1,lw=1)\n\nra, thetaa = utils.getVals(encoded,np.array([3.3,3.3]),sorted=Sorted)\nfor j in range(len(ra)):\n ax1.plot([thetaa[j],thetaa[j]], [1,ra[j]], color='r',alpha=0.9,lw=1.3)\n \nax1.set_title(\"Normal Isolation Forest\\nNormal: Mean={0:.3f}, Var={1:.3f}\\nAnomaly: Mean={2:.3f}, Var={3:.3f}\".format(np.mean(rn),np.var(rn),np.mean(ra),np.var(ra)))\n\nax1.set_xticklabels([])\nax1.set_xlabel(\"Anomaly\")\nax1.set_ylim(0,encoded.limit)\n\nax1.axes.get_xaxis().set_visible(False)\nax1.axes.get_yaxis().set_visible(False)\nplt.show()\n","sub_path":"Encoder_Forest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"111496463","text":"\"\"\"Defines functions and storage for trading stocks\"\"\"\n\nfrom collections import namedtuple\nimport datetime\nfrom decimal import Decimal\nfrom stocks import Stock, tradable_stocks\n\n\n# Using a factory function to create the storage objects for individual trades and\n# collections of trades lets us replace the current basic and in-memory\n# implementations with a Trade object and genuine persistence but keep the API intact\n# at some point in the future.\ndef persistence_factory(mode='throwaway'):\n if mode == 'throwaway':\n Trade = namedtuple('Trade', ['stock', 'time', 'quantity', 'direction', 'traded_price'])\n trades = []\n return (Trade, trades)\n\n\nTrade, trade_collection = persistence_factory()\n\n\n# Please see the assumptions and interpretations in the README\n# for some background on the running_product and number_of_trades\naccumulating_values = {\n 'number_of_trades': 0,\n 'running_product': 1\n}\n\n\ndef record_trade(stock_symbol, time=None, quantity=0, direction='', traded_price=0):\n \"\"\"\n Expected arguments as follows:\n stock_symbol - string representing an instance of any class derived from Stock\n time - datetime defaults to now in reality past times would presumably be disallowed\n quantity - int the number of shares traded in the trade\n direction - string only 'buy' and 'sell' acceptable values\n traded_price - int unit price in pennies for the trade\n\n \"\"\"\n if isinstance(tradable_stocks[stock_symbol], Stock):\n if direction not in ('buy', 'sell'):\n raise ValueError(\"Direction argument must be either 'buy' or 'sell'\")\n if time is None:\n time = datetime.datetime.now()\n trade_collection.append(Trade(tradable_stocks[stock_symbol], time, quantity, direction, traded_price))\n accumulating_values['running_product'] = traded_price * accumulating_values['running_product']\n accumulating_values['number_of_trades'] += 1\n else:\n raise KeyError(\"No stock available with that symbol\")\n\n\ndef volume_weighted_stock_price(stock_symbol):\n \"\"\"\n Takes a string representing a tradable stock and returns the\n volume weighted stock price based on the last 15 minutes' activity.\n\n \"\"\"\n if not isinstance(tradable_stocks[stock_symbol], Stock):\n raise KeyError(\"No stock available with that symbol\")\n relevant_trades = [trade\n for trade in trade_collection\n if trade.stock.stock_symbol == stock_symbol\n and trade.time >= (datetime.datetime.now() - datetime.timedelta(minutes=15))]\n numerator = sum([trade.traded_price * trade.quantity\n for trade in relevant_trades])\n denominator = sum([trade.quantity\n for trade in relevant_trades])\n div_zero_message = \"No stocks of this type have been traded in the last 15 minutes.\"\n return Decimal(numerator / denominator) if denominator else div_zero_message\n\n\ndef all_share_index(results=trade_collection):\n \"\"\"\n Takes a container of trades, with our results list as the default\n and returns the nth root of the product of each trade unit price\n from 0 to n. Passing in the container lets us use a test container.\n\n \"\"\"\n div_zero_message = \"No trades have taken place yet.\"\n running_product = accumulating_values['running_product']\n number_of_trades = accumulating_values['number_of_trades']\n return Decimal(\n running_product ** (1/number_of_trades)\n ) if number_of_trades else div_zero_message\n","sub_path":"exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"46602324","text":"from .auth import APIAuth\nimport requests\nimport json\nfrom .utils import Utils as AUtils\n\n\nclass APIClient:\n \"\"\"Various methods for Client BO stuff\"\"\"\n def __init__(self, api_auth: APIAuth):\n self.api_auth = api_auth\n self._client_location_view = None\n\n def get_client_location_view(self):\n \"\"\"Get a list of all the locations, sensorlocations, buildingmaps and macs for this account\n :return:\n \"\"\"\n headers = {\"Authorization\": \"Bearer \" + self.api_auth.get_token()}\n url = self.api_auth.api_config.get_api_url() + \"client/locationview\"\n response = requests.get(url, headers=headers)\n\n if response.status_code == 200:\n self._client_location_view = json.loads(response.content.decode())\n return self._client_location_view\n else:\n print(\"Bad response code: \" + str(response.status_code))\n return None\n\n def get_active_locations(self):\n devices_and_locations = self._client_location_view['locationSensorViews']\n return [location for location in devices_and_locations if location['lastSensorReportTime'] != -1]\n\n def get_active_devices(self, duration: int = (24 * 60 * 60 * 1000)):\n active_locs = self.get_active_locations()\n now = AUtils.now_ms()\n\n active_devices = []\n for loc in active_locs:\n for device in loc['sensorList']:\n if(now - device['lastReportTime']) < duration:\n active_devices.append(device)\n\n return active_devices\n\n def get_locations(self):\n devices_and_locations = self._client_location_view['locationSensorViews']\n return [location['location'] for location in devices_and_locations]\n\n def get_location_by_id(self, location_id: str):\n devices_and_locations = self._client_location_view['locationSensorViews']\n for location in devices_and_locations:\n if location['location']['id'] == location_id:\n return location\n return None\n\n def get_device_by_id(self, device_id: str):\n devices_and_locations = self._client_location_view['locationSensorViews']\n for location in devices_and_locations:\n for device in location['sensorList']:\n if device['id'] == device_id:\n return device\n\n return None\n\n","sub_path":"python/aretasapiclient/aretas_client.py","file_name":"aretas_client.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"113912568","text":"# f = open('secrets.txt', mode='w')\n# f.write('This is a new line')\n# f.close()\n# print(secret_data)\n\n# reading the data from the file and saving it to data argument\n# with open('secrets.txt', 'r') as f:\n# data = f.read()\n\n\n# adds another line to the data argument\n# with open('secrets.txt', 'w') as f:\n# f.write(data + '\\nThis is a new line 22222')\n#\n# # reads the content of the file\n# with open('secrets.txt', 'r') as f:\n# data2 = f.read()\n#\n# print(data2)\n\nimport json\n\n# with open('json_test.txt', 'r') as f:\n# # load is like read from json file as a python data type\n# json_dict = json.load(f)\n# # json.loads(f.read()) would be a string\n#\n# print(json_dict)\n# # reads it as a dictionary\n# print(type(json_dict))\n\n\n# my_family = {\n# \"parents\": ['Beth', 'Jerry'],\n# \"children\": ['Summer', 'Morty']\n# }\n#\n#\n# def write_to_json(data):\n# with open('new_json.json', 'w') as f:\n# json.dump(data, f)\n#\n#\n# def read_from_json():\n# with open('new_json.json', 'r') as f:\n# data = json.load(f)\n# return data\n#\n#\n# write_to_json(my_family)\n# new_fam = read_from_json()\n# print(new_fam)\n#\n# new_fam['JSONTest'] = 'Hello world'\n# write_to_json(new_fam)\n# print(read_from_json())\n\n\n# def add_item_to_menu(name, price):\n# menu.append({'name': name,\n# 'price': price})\n#\n#\n# def read_from_menu():\n# with open('menu.json', 'r') as f:\n# data = json.load(f)\n# return data\n#\n#\n# try:\n# menu = read_from_menu()\n# except FileNotFoundError and json.decoder.JSONDecodeError:\n# menu = []\n#\n#\n# def write_to_menu(data):\n# with open('menu.json', 'w') as f:\n# json.dump(data, f)\n#\n#\n# print(menu)\n# while True:\n# name = input('Enter a name of a dish: ')\n# if name == 'quit':\n# break\n# price = input('Enter a price: ')\n# add_item_to_menu(name, price)\n#\n# print(menu)\n# write_to_menu(menu)\nimport random\n\nnew_fam = {\n \"firstName\": \"Jane\",\n \"lastName\": \"Doe\",\n \"hobbies\": [\"running\", \"sky diving\", \"singing\"],\n \"age\": 35,\n \"children\": [\n {\n \"firstName\": \"Alice\",\n \"age\": 6\n },\n {\n \"firstName\": \"Bob\",\n \"age\": 8\n }\n ]\n}\n\nwith open('json2.json', 'w') as f:\n json.dump(new_fam, f)\n\nwith open('json2.json', 'r') as f:\n family = json.load(f)\n\nfor child in family['children']:\n print(f'{family[\"firstName\"]}\\'s child {child[\"firstName\"]} is {child[\"age\"]}')\n child['fav_color'] = random.choice(['blue', 'yellow', 'green'])\n\nwith open('json2.json', 'w') as f:\n json.dump(family, f, indent=2)\n","sub_path":"Week5/Day4/CourseNotes/CN.py","file_name":"CN.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"277023689","text":"import Accountant\nimport Strategy\nimport DataFeed\nimport matplotlib.pyplot as plt\n\ndef runTask():\n data = DataFeed.TextDataFeed('spx_prices.csv')\n params = {'signal_window' : 40, 'max_position' : 100, \n 'wait_period' : 100}\n trader = Strategy.SNPStrategy(params)\n booky = Accountant.Accountant(data, trader)\n booky.runBacktest()\n return booky\n\ndef showResults(resu):\n plt.plot(resu.cumPnl)\n plt.show()\n","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"305942057","text":"class Apple:\n price = 5\n created_apples = 0\n\n def __init__(self, color):\n Apple.created_apples += 1\n\n if Apple.created_apples % 12 == 0:\n self.color = \"brurple\"\n elif Apple.created_apples == 3:\n self.color = \"purple\"\n elif Apple.created_apples == 4:\n self.color = \"brown\"\n else:\n self.color = color\n self.price = Apple.price\n\n @classmethod\n def change_price(cls, new_price):\n cls.price = new_price\n\n def __repr__(self):\n return \"This is a {:.0f} cedi {} Apple\".format(self.price, self.color)\n\nif __name__ == \"__main__\":\n # first = Apple(\"red\")\n # print(first)\n # second = Apple(\"blue\")\n # print(second)\n # Apple.change_price(6.0)\n # third = Apple(\"green\")\n # print(third)\n # print(first)\n for _ in range(30):\n print(Apple(\"red\"))\n","sub_path":"apple.py","file_name":"apple.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"571036407","text":"##Text extractor.\n\n\n##THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n##SOFTWARE.\n\nimport argparse\nimport struct\nimport string\n\njumps = 12 ## 8/12\noffset = 48 ## 44/48\ntenc = 'cp1251' ##cp932 or 1251\n\nparser = argparse.ArgumentParser(description='Text extractor for Spyro 3.')\nparser.add_argument('filepath', type=str, help = 'Path to level file.')\nparser.add_argument('--output', type=str, default = 's3_text.txt', help = 'Output file name.')\n\n\nargs = parser.parse_args()\n\ndef isUppercase(tSym):\n\tretVal = False\n\tcompStr = string.ascii_uppercase\n\tfor symbol in compStr:\n\t\tif tSym == symbol:\n\t\t\t\n\t\t\tretVal = True\n\treturn retVal\n\ndef getSubfileInfo(filepath, subfile):\n\tinfo_list = list()\n\tifile = open(filepath, 'rb')\n\tifile.seek((subfile-1)*8)\n\tinfo_list.append(struct.unpack(' varaddr and lstart < sf_size:\n\t\t\twad.seek(filestart+lstart)\n\t\t\tidbyte = wad.read(1)[0]\n\t\t\tetest = wad.read(1)\n\t\t\twad.seek(filestart+lstart+1)\n\t\t\tif not idbyte == 255 and len(etest) == 1:\n\t\t\t\tlTrig = True\n\t\t\t\tlsize = 0\n\t\t\t\twhile lTrig:\n\t\t\t\t\tif etest[0] == 0:\n\t\t\t\t\t\tlTrig = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tlsize += 1\n\t\t\t\t\tetest = wad.read(1)\n\t\t\t\t\t\n\t\t\t\tif lsize > 2:\n\t\t\t\t\tptr_list.append(varaddr+12)\n\t\t\t\t\tptr_list.append(0)\n\n\t\twad.seek(filestart+varaddr)\n\t\tllist = list()\n\t\tplist = list()\n\t\terrcount = 0\n\n\t\tvarlen = varlen_list[x]\n\t\t\n\t\tfor v in range(int(varlen/4)-4):\n\t\t\tlstart = struct.unpack(' varaddr:\n\t\t\t\tbreak\n\t\t\telif not lstart < sf_size:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tplist.append(varaddr+16+v*4)\n\t\t\t\tllist.append(lstart)\t\n\n\t\tfor tl in range(len(llist)):\n\t\t\twad.seek(filestart+llist[tl])\n\t\t\tidbyte = wad.read(1)[0]\n\t\t\tif not idbyte == 255:\n\t\t\t\ttxt = filestart+llist[tl]+idbyte\n\t\t\t\twad.seek(txt)\n\n\t\t\t\tlTrig = True\n\t\t\t\tlsize = 0\n\t\t\t\twhile lTrig:\n\t\t\t\t\tif wad.read(1)[0] == 0:\n\t\t\t\t\t\tlTrig = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tlsize += 1\n\n\t\t\t\twad.seek(txt)\n\t\t\t\ttbuf = wad.read(lsize)\n\t\t\t\tif (lsize > 2):\n\t\t\t\t\tptr_list.append(plist[tl])\n\t\t\t\t\tptr_list.append(1)\n\twad.close()\n\treturn ptr_list\n\ndef getTexts(filepath, subfile):\n\twtmp = getSubfileInfo(filepath, subfile)\t\n\tfilestart = wtmp[0]\n\tsf_size = wtmp[1]\n\n\ttxt_list = list()\n\tpointers = getPointersAddr(filepath, subfile)\t\n\n\twad = open(filepath, 'rb')\n\tfor t in range(int(len(pointers)/2)):\n\t\twad.seek(filestart+pointers[t*2])\n\t\ttxtbuf = wad.read(4)\n\t\tlstart = struct.unpack(' total_in: #total in not less than total out -- moved to miner checking\n # return False\n return True\n\n#################### testing ######################\n\n #prevents it from running if loaded the module... if just invoke it as a python script will run the tests\nif __name__ == \"__main__\":\n pr1, pu1 = signature.generate_keys()\n pr2, pu2 = signature.generate_keys()\n pr3, pu3 = signature.generate_keys()\n pr4, pu4 = signature.generate_keys()\n\n Tx1 = Tx()\n Tx1.add_input(pu1,1)\n Tx1.add_output(pu2,1) #sending coin to pu2\n Tx1.sign(pr1) # signing with private key so valid\n\n if Tx1.is_valid():\n print(\"Success! Tx is valid!\")\n else:\n print(\"Error! Tx is invalid!\")\n\n Tx2 = Tx()\n Tx2.add_input(pu1,2)\n Tx2.add_output(pu2,1)\n Tx2.add_output(pu3,1)\n Tx2.sign(pr1) # using the private key of #1 since is the input\n\n # Tx3 is a test for escrow transaction\n Tx3 = Tx()\n Tx3.add_input(pu3,1.2) # shouldnt have more output than input.\n Tx3.add_output(pu1,1.1) #mining and mining rewards, transaction fee usually goes to miner.\n Tx3.add_required(pu4)\n Tx3.sign(pr3)\n Tx3.sign(pr4)\n\n\n for t in [Tx1,Tx2,Tx3]:\n if t.is_valid():\n print(f\"Success! Tx is valid!\")\n else:\n print(f\"Error! Tx is invalid!\")\n\n # wrong signatures, should be pu1 signing\n Tx4 = Tx()\n Tx4.add_input(pu1,1) #user 1 is sending 1 count to Tx4\n Tx4.add_output(pu2,1)\n Tx4.sign(pr2) #but here signed with the wrong private_key\n\n # Escrow Tx transaction not signed by the arbiter\n Tx5 = Tx()\n Tx5.add_input(pu3,1.2)\n Tx5.add_output(pu1,1.1)\n Tx5.add_required(pu4)\n Tx5.sign(pr3)\n # Tx5.sign(pr4)\n\n # Two input addres, signed by one only\n Tx6 = Tx()\n Tx6.add_input(pu3,1)\n Tx6.add_input(pu4,0.1)\n Tx6.add_output(pu1,1.1)\n Tx6.sign(pr3) #Tx8.sign(pr4) # is missing so should be invalid\n\n # Outputs exceed the Inputs\n Tx7 = Tx()\n Tx7.add_input(pu4, 1.2)\n Tx7.add_output(pu1,1)\n Tx7.add_output(pu2,2)\n Tx7.sign(pr4)\n\n #negative value tests\n Tx8 = Tx()\n Tx8.add_input(pu2, -1)\n Tx8.add_output(pu1,-1)\n Tx8.sign(pr2)\n\n # Modified after Transaction signed\n Tx9 = Tx()\n Tx9.add_input(pu1,1)\n Tx9.add_output(pu2,1) #sending coin to pu2\n Tx9.sign(pr1) # signing with private key so valid\n Tx9.outputs[0]=(pu3,1) #instead of pu2 which was [(pu2,1)]\n\n\n for t in [Tx4, Tx5, Tx6, Tx7, Tx8,Tx9]:\n if t.is_valid():\n print(f\"Error! Tx is valid!\")\n else:\n print(f\"Success! Tx is invalid!\")\n\n####################### resources ########################\n","sub_path":"transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"468180536","text":"\"\"\"exp_to_source: Reformat Level2b MSA data to be source-based.\n\"\"\"\nfrom collections import defaultdict\n\nfrom jwst.datamodels import MultiExposureModel, ModelContainer, ImageModel\nfrom jwst.datamodels.properties import merge_tree\n\n__all__ = ['exp_to_source', 'multislit_to_container']\n\n\ndef exp_to_source(inputs):\n \"\"\"Reformat exposure-based MSA data to source-based.\n\n Parameters\n ----------\n inputs: [MultiSlitModel, ...]\n List of MultiSlitModel instances to reformat.\n\n Returns\n -------\n {str: MultiExposureModel, }\n Returns a dict of MultiExposureModel instances wherein each\n instance contains slits belonging to the same source.\n The key is the name of each source.\n \"\"\"\n result = defaultdict(MultiExposureModel)\n for exposure in inputs:\n for slit in exposure.slits:\n result[slit.name].exposures.append(slit)\n result[slit.name].exposures[-1].meta = exposure.meta\n return result\n\n\ndef multislit_to_container(inputs):\n \"\"\"Reformat exposure-based MSA data to source-based containers.\n\n Parameters\n ----------\n inputs: [MultiSlitModel, ...]\n List of MultiSlitModel instances to reformat, or just a \n ModelContainer full of MultiSlitModels.\n\n Returns\n -------\n {str: ModelContainer, }\n Returns a dict of ModelContainer instances wherein each\n instance contains ImageModels of slits belonging to the same source.\n The key is the name of each slit.\n \"\"\"\n result = defaultdict(ModelContainer)\n for exposure in inputs:\n for slit in exposure.slits:\n result[slit.name].append(ImageModel(slit.instance))\n merge_tree(result[slit.name][-1].meta.instance, \n exposure.meta.instance)\n return result\n","sub_path":"jwst/exp_to_source/exp_to_source.py","file_name":"exp_to_source.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"94816610","text":"import subprocess\nimport sys\n\n\nif __name__ == \"__main__\":\n\n def return_sub_call():\n\n print(\"Calling 'ls -l'...\")\n p = subprocess.call((\"ls\", \"-l\"), stdout=subprocess.PIPE)\n print(\"DONE! The command {}\\r\\n\".format(\"succeeded\" if not p else \"failed\"))\n\nreturn_sub_call()\n","sub_path":"rax_university/pythonII/derp.py","file_name":"derp.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"23980690","text":"import re\nfrom operator import itemgetter\nfrom pathlib import Path\n\n\"\"\"\n\nTHIS FILE IS NOT USED BY finetune OR inference SCRIPT \n\nTHESE UTILS WERE USED FOR CONVERTING DATASETS IN \nOTHER FORMATS OR TO EXTRACT SENTENCES WITH MANY ENTITIES INSIDE\n\n\"\"\"\n\n\ndef main_method():\n # copy this in real -main- file\n \"\"\"\n result = extract_rich_data(data_args.data_dir + 'prova.txt', tag_style=\"bio\")\n sents = list()\n for s in result[:40]:\n sents.append([s[0]])\n\n with open(os.path.join(training_args.output_dir, 'selected_sents_conll.csv'), 'w', newline='',\n encoding='utf8') as out:\n writer = csv.writer(out)\n writer.writerows(sents)\n \"\"\"\n\n\ndef extract_rich_data(path, tag_style=\"bio\"):\n \"\"\"\n FOR TEST ONLY\n \"\"\"\n text, labels = read_dataset_conll(path)\n flat_text = [' '.join(seq) for seq in text]\n flat_labels = [' '.join(lab_seq) for lab_seq in labels]\n seq_scores = [0] * len(flat_text)\n\n # compute scores\n if tag_style == \"bio\":\n for i, seq in enumerate(flat_text):\n labels_found = set()\n for j, word in enumerate(text[i]):\n lab = labels[i][j]\n if 'B-' in lab:\n if lab not in labels_found:\n seq_scores[i] += 2\n labels_found.add(lab)\n else:\n seq_scores[i] -= 1\n\n #if len(seq.split()) < 30:\n # seq_scores[i] = 0\n else: #\"IO\"\n for i, seq in enumerate(flat_text):\n labels_found = set()\n last_label_found = \"\"\n for j, word in enumerate(text[i]):\n lab = labels[i][j]\n if 'I-' in lab:\n if lab not in labels_found:\n seq_scores[i] += 2\n labels_found.add(lab)\n elif lab != last_label_found:\n seq_scores[i] += 1\n last_label_found = lab\n\n result = sorted([list(x) for x in zip(flat_text, flat_labels, seq_scores)], key=itemgetter(2))[::-1]\n\n return result\n\n\ndef read_dataset_conll(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text().strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n splits = raw_line.rsplit(sep=' ', maxsplit=4)\n tmp_tag = splits[-1]\n tmp_word = splits[0]\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_gmb(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n\n raw_lines = re.split(r'\\n', raw_text)\n lines = list()\n for line in raw_lines:\n if line[0] == ',':\n lines.append(line[1:])\n else:\n lines.append(line)\n\n text = []\n labels = []\n\n tmp_sent_words = []\n tmp_sent_labels = []\n for line in lines:\n if line.startswith('Sentence: '):\n if tmp_sent_labels and tmp_sent_words:\n text.append(tmp_sent_words)\n labels.append(tmp_sent_labels)\n tmp_sent_words = []\n tmp_sent_labels = []\n _, word, pos, ne_label = line.split(',')\n tmp_sent_words.append(word)\n tmp_sent_labels.append(ne_label)\n\n else:\n try:\n splits = line.rsplit(sep=',', maxsplit=4)\n ne_label = splits[-1]\n word = splits[-3]\n tmp_sent_words.append(word)\n tmp_sent_labels.append(ne_label)\n except:\n pass\n\n return text, labels\n\n\ndef read_dataset_wikigold(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\n', raw_text)\n text = []\n labels = []\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n tmp_word, tmp_tag = raw_line.split(' ')\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_wnut(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n try:\n tmp_word, tmp_tag = raw_line.split('\\t')\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n except:\n pass\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels\n\n\ndef read_dataset_secfiling(path):\n \"\"\"\n TEST ONLY: legge il dataset e restituisce frasi e labels\n - text: lista di liste di stringhe (parole)\n - labels: lista di liste di label\n \"\"\"\n file_path = Path(path)\n raw_text = file_path.read_text(encoding=\"utf8\").strip()\n raw_sequences = re.split(r'\\n\\t?\\n', raw_text)\n text = []\n labels = []\n\n for raw_seq in raw_sequences:\n tmp_sent = []\n tmp_tags = []\n for raw_line in raw_seq.split('\\n'):\n try:\n splits = raw_line.rsplit(sep=' ')\n tmp_word = splits[-4]\n tmp_tag = splits[-1]\n tmp_sent.append(tmp_word)\n tmp_tags.append(tmp_tag)\n except:\n print(raw_line)\n pass\n text.append(tmp_sent)\n labels.append(tmp_tags)\n\n return text, labels","sub_path":"utils/datasets_utils.py","file_name":"datasets_utils.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"237458509","text":"# 9095 / count number for make N with 1, 2, 3\ndp = {1: 1, 2: 2, 3: 4} # set DP dict\n\n\ndef find(n):\n global dp\n\n if n not in dp:\n temp = 0\n # minus i from N for find number for make N - i\n for i in [1, 2, 3]:\n if n - i >= 0:\n temp += find(n - i)\n dp[n] = temp\n\n return dp[n]\n\n\nfor T in range(int(input())):\n N = int(input())\n\n print(find(N))\n","sub_path":"dp/9095.py","file_name":"9095.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"11622972","text":"import sys\nsys.path.append(\"..\")\nimport os\nimport javabridge\nimport bioformats\nimport SimpleITK as sitk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nrrd\nfrom tools import image_io as bfio\nfrom tools import image_processing as impro\n\n# Start the Java VM\njavabridge.start_vm(class_path=bioformats.JARS)\n\n#path_to_data = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'Daten', '24h', 'untreated'))\npath_to_data = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', '..', 'Daten2'))\ninterpolator = 'bspline'\n\nfor directory in os.listdir(path_to_data):\n data_dir = os.path.join(path_to_data, directory)\n if os.path.exists(data_dir):\n for filename in os.listdir(data_dir):\n if filename.endswith('.tif'):\n path_to_tif = os.path.join(data_dir, filename)\n print('Processing image: ', path_to_tif)\n \n # Get a numpy array from the tif stack with the dimension\n meta_data, raw_data = bfio.get_tif_stack(filepath=path_to_tif, series=0, depth='z', return_dim_order='XYZC') # XYZC\n \n # Transpose the numpy array from XYZC to CZYX for the use with SimpleITK\n raw_data = np.transpose(raw_data, axes=[3,2,1,0]) # CZYX\n \n # Extract the channel -> make for each channel\n raw_data = raw_data[0,:,:,:]\n \n # Make a SimpleITK out of the numpy array and set its metadata\n image = sitk.GetImageFromArray(raw_data, isVector=False) # XYZ\n image.SetOrigin([0.0, 0.0, 0.0])\n image.SetDirection(np.identity(3, dtype=np.double).flatten().tolist())\n image.SetSpacing((meta_data.get('physical_size_x'), \n meta_data.get('physical_size_y'), \n meta_data.get('physical_size_z')))\n #print(image.GetOrigin())\n #print(image.GetDirection())\n #print(image.GetSpacing())\n \n # Make isotropic voxels. Distinction needed, so that \n # 48h->untreated_3 and 72h->untreated_1 have the same z-size as\n # the corresponding OpenSegSPIM-data\n \n resampled_image = impro.make_image_isotropic(image, interpolator, 0)\n \n #print(resampled_image.GetOrigin())\n #print(resampled_image.GetDirection())\n #print(resampled_image.GetSpacing())\n \n # Get a numpy array from the resampled simpleITK image\n np_image = sitk.GetArrayFromImage(resampled_image)\n \n # Transpose the numpy array from ZYX back to to XYZ\n np_image = np.transpose(np_image, axes=[2,1,0]) # XYZ\n np_image = np_image.astype('uint8')\n \n \n \n new_spacing = resampled_image.GetSpacing()\n header = {\"spacings\": [new_spacing[0], new_spacing[1], new_spacing[2]], \n \"dimension\": np_image.ndim,\n \"type\": \"uchar\", \n \"sizes\": [resampled_image.GetWidth(), resampled_image.GetHeight(), \n resampled_image.GetDepth()],\n \"units\": ['\"microns\"', '\"microns\"', '\"microns\"']}\n name = os.path.splitext(filename)[0]\n new_filename = os.path.join(data_dir, name)\n new_filename = new_filename+'_8_bit'+'.nrrd'\n nrrd.write(new_filename, data=np_image, header=header, index_order='F')","sub_path":"01-data_preparation/01-ds2-generate_isotropic_voxels.py","file_name":"01-ds2-generate_isotropic_voxels.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"269763780","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','ProTwo.settings')\n# configuring the settings for the project\n\nimport django\ndjango.setup()\n\n## Fake POP script\nimport random\nfrom AppTwo.models import User\nfrom faker import Faker\nfakegeneration = Faker()\ndef populate(N=5):\n for entry in range(N):\n # get the topic for the entry\n # create the fake data for that entry\n fake_firstName = fakegeneration.first_name()\n fake_lastName = fakegeneration.last_name()\n fake_email = fakegeneration.email()\n\n # create the new User entry\n user = User.objects.get_or_create(first_name=fake_firstName,last_name=fake_lastName,email=fake_email)[0]\n\nif __name__ == '__main__':\n print(\"populating script\")\n populate(20)\n print(\"populating complete\")\n","sub_path":"ProTwo/populate_AppTwo.py","file_name":"populate_AppTwo.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"408342869","text":"import json\nimport traceback\n\nimport requests\n\nfrom datetime import datetime, timedelta\nfrom threading import Thread\n\nfrom sqlalchemy import text\n\nfrom webapp import app, db, steem\nfrom models import Post\nfrom utilities import log, seconds_from_youtube_duration, get_valid_video, markdown_to_safe_html\n\nfrom utilities import DBConnection\n\n\nclass PostUpdateThread(Thread):\n def __init__(self, db, app):\n Thread.__init__(self)\n self.app = app\n self.db = db\n\n self.posts_to_delete = []\n\n # update scores of posts created within last week, set older post scores to 0\n def updatePostScores(self):\n try:\n # Shouldn't this use SQLAlchemy?\n # Todo: Test performance against raw SQLAlchemy updates\n with DBConnection() as db:\n q = '''\n update posts set\n trending_score =\n (pow(pending_payout_value, 0.4) * 1000000) / pow(EXTRACT(EPOCH FROM current_timestamp - created) + 300, 0.2),\n hot_score =\n (sqrt(pending_payout_value - least(9.99, pending_payout_value)) * 1000000) / (EXTRACT(EPOCH FROM current_timestamp - created) + 60)\n where EXTRACT(EPOCH FROM current_timestamp - created) > 600\n and EXTRACT(EPOCH FROM current_timestamp - created) < 604800\n '''\n db.engine.execute(text(q).execution_options(autocommit=True))\n q = '''\n update posts set\n trending_score = 0, hot_score = 0\n where EXTRACT(EPOCH FROM current_timestamp - created) >= 604800\n and trending_score > 0\n '''\n db.engine.execute(text(q).execution_options(autocommit=True))\n\n except Exception as ex:\n log('Failed to update scores...')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n\n # query Steem API node for up to date content, and add to post\n def update_steem_info(self, post):\n with DBConnection() as db:\n try:\n # trap http type errors and retry fetch\n content = {}\n while not content:\n try:\n content = steem.get_content(post.author, post.permlink)\n except Exception as e:\n log('Problem getting Steem info from API for: @' + post.author + '/' + post.permlink + '!')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n\n post.created = datetime.strptime(content['created'], '%Y-%m-%dT%H:%M:%S')\n post.category = content['category']\n\n js = content.get('json_metadata', '[]')\n metadata = json.loads(js)\n tags = metadata.get('tags', [])\n\n post.tags = ' '.join(tags)\n post.is_nsfw = True if post.tags.lower().find('nsfw') >= 0 else False\n post.title = content['title']\n post.has_declined_payout = False if float(content['max_accepted_payout'].split(' ')[0]) > 0 else True\n post.pending_payout_value = float(content['pending_payout_value'].split(' ')[0])\n post.total_payout_value = float(content['total_payout_value'].split(' ')[0])\n post.has_paidout = True if post.total_payout_value > 0 else False\n post.steem_json = content # todo - decide what of this should be stored\n post.steem_thumbnail_image_url = ''\n\n new_type, new_video_id, new_category = get_valid_video(content)\n\n # if valid on update, use new values, otherwise assume old values remain\n # this check is applied so dtube posts, edited in steemit are still retained\n if new_type and new_video_id and new_category:\n post.video_type, post.video_id, post.category = new_type, new_video_id, new_category\n post.description = markdown_to_safe_html(content['body'])\n\n return post\n except Exception as ex:\n log(f'Problem updating Steem info for: @{post.author }/{post.permlink }!')\n log(f\"{ex}\\n {traceback.format_exc()}\")\n return \"delete\"\n\n # query youtube/dtube/vimeo for up to date content, and add to post\n def update_video_info(self, post):\n\n try:\n if post.video_type == 'youtube':\n video_id = post.video_id\n video_api_key = app.config['YOUTUBE_API_KEY']\n # url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics%2Cstatus%2Cplayer&id=' + video_id + '&key=' + video_api_key\n url = f'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails&id={video_id}&key={video_api_key}'\n try:\n js = requests.get(url).json()\n except Exception as ex:\n log(url)\n log('Problem accessing YouTube Video info for: @' + post.author + '/' + post.permlink + '!')\n return \"delete\"\n\n if \"items\" in js and len(js[\"items\"]) == 1:\n item = js[\"items\"][0]\n post.video_thumbnail_image_url = item['snippet']['thumbnails']['medium']['url']\n post.video_duration_seconds = seconds_from_youtube_duration(item['contentDetails']['duration'])\n post.video_provider_channel_id = item['snippet']['channelId']\n video_published = datetime.strptime(item['snippet']['publishedAt'][:-5], '%Y-%m-%dT%H:%M:%S')\n if post.created > video_published:\n post.video_post_publish_delay_seconds = (post.created - video_published).total_seconds()\n else:\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = {'snippet': item['snippet'], 'contentDetails': item['contentDetails']}\n\n elif post.video_type == 'dtube':\n try:\n url = 'https://steemit.com/dtube/@' + post.author + '/' + post.permlink + '.json'\n try:\n js = requests.get(url).json()['post']\n except Exception as ex:\n log(url)\n log('Problem accessing DTube Video info for: @' + post.author + '/' + post.permlink + '!')\n return\n metadata = js.get('json_metadata', '[]')\n post.video_thumbnail_image_url = 'https://ipfs.io/ipfs/' + metadata['video']['info']['snaphash']\n post.video_duration_seconds = metadata['video']['info']['duration']\n post.video_provider_channel_id = ''\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = metadata\n except Exception as ex:\n # todo - fix regex so invalid dtubes don't reach here, then remove\n log('Problem updating updating dtube video info: ' + f\"{ex}\\n{traceback.format_exc()}\")\n log('Assumed Invalid, and Deleted post! : @' + post.author + '/' + post.permlink)\n return \"delete\"\n\n elif post.video_type == 'dlive':\n try:\n url = 'https://steemit.com/dlive/@' + post.author + '/' + post.permlink + '.json'\n try:\n js = requests.get(url).json()['post']\n except Exception as e:\n log(url)\n log('Problem accessing DLive Video info for: @' + post.author + '/' + post.permlink + '!')\n return \"delete\"\n metadata = js.get('json_metadata', '[]')\n post.video_thumbnail_image_url = metadata.get('thumbnail', '')\n post.video_duration_seconds = -1\n post.video_provider_channel_id = ''\n post.video_post_publish_delay_seconds = 0\n # todo - decide which metadata to store in DB\n # post.video_info = metadata\n except Exception as ex:\n # todo - fix intake filter regex so invalid dlives don't reach here, then remove\n log('Problem updating updating dlive video info: ' + f\"{ex}\\n{traceback.format_exc()}\")\n log('Assumed Invalid, and Deleted post! : @' + post.author + '/' + post.permlink)\n return \"delete\"\n\n # todo - implement support\n elif post.video_type == 'vimeo':\n pass\n\n except Exception as ex:\n log('Updating video info failed for: @' + post.author + '@' + post.permlink + '!')\n log(f\"{ex}\\n{traceback.format_exc()}\")\n return \"delete\"\n\n return post\n\n # query thread to update posts with pending update, and perform them\n # also update trending/hot scores every 5 minutes\n def run(self):\n last_updated_post_scores = datetime.now() - timedelta(seconds=240)\n while True:\n # update post scores every 5 minutes\n if (datetime.now() - last_updated_post_scores).seconds > 300:\n log('Updating post scores...')\n self.updatePostScores()\n last_updated_post_scores = datetime.now()\n log('Updated post scores!')\n\n with DBConnection() as db:\n post = db.session.query(\n Post\n ).filter(\n Post.pending_video_info_update\n ).order_by(\n Post.video_info_update_requested\n ).first()\n\n if post:\n new_post = self.update_steem_info(post)\n\n if new_post == \"delete\":\n db.session.delete(post)\n db.session.commit()\n continue\n\n post = new_post\n post.pending_steem_info_update = False\n\n new_post = self.update_video_info(post)\n\n if new_post == \"delete\":\n db.session.delete(post)\n db.session.commit()\n continue\n\n post = new_post\n\n post.pending_video_info_update = False\n db.session.commit()\n\n\nlog('Started Post Info Updater')\n\n# start thread for updating post info\nthread_1 = PostUpdateThread(db, app)\nthread_1.start()\n","sub_path":"web/app/post-info-updater.py","file_name":"post-info-updater.py","file_ext":"py","file_size_in_byte":10889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"409690473","text":"# @copyright@\n# Copyright (c) 2006 - 2017 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\nimport stack.commands\nimport threading\nimport subprocess\nimport time\nimport os\n\nmax_threading = 512\ntimeout\t= 30\n\n\nclass command(stack.commands.HostArgumentProcessor,\n\tstack.commands.sync.command):\n\tpass\n\n\nclass Parallel(threading.Thread):\n\tdef __init__(self, cmd, out=None):\n\t\tself.cmd = cmd\n\t\tif not out:\n\t\t\tself.out = {\"output\": \"\", \"error\": \"\", \"rc\": 0}\n\t\telse:\n\t\t\tself.out = out\n\t\twhile threading.activeCount() > max_threading:\n\t\t\ttime.sleep(0.001)\n\t\tthreading.Thread.__init__(self)\n\n\tdef run(self):\n\t\tp = subprocess.Popen(self.cmd,\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.STDOUT,\n\t\t\tshell=True)\n\t\t(o, e) = p.communicate()\n\t\trc = p.wait()\n\t\tself.out['output'] = o\n\t\tself.out['error'] = e\n\t\tself.out['rc'] = rc\n\n\nclass Command(command):\n\t\"\"\"\n\tWrites the /etc/hosts file based on the configuration database\n\t\"\"\"\n\n\tdef run(self, params, args):\n\n\t\tself.notify('Sync Host\\n')\n\n\t\toutput = self.command('report.host')\n\t\tf = open('/etc/hosts', 'w')\n\t\tf.write(\"%s\\n\" % output)\n\t\tf.close()\n\n\t\tif os.path.exists('/srv/salt/rocks'):\n\t\t\tf = open('/srv/salt/rocks/hosts', 'w')\n\t\t\tf.write(\"%s\\n\" % output)\n\t\t\tf.close()\n\n\n\n\n\n","sub_path":"common/src/stack/command/stack/commands/sync/host/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"606948938","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 17:51:25 2018\n\n@author: github.com/sahandv\n\"\"\"\nfrom __future__ import print_function, division\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\n# =============================================================================\n# Split / Partition data with easy to understand proportions for your network \n# to 3 part.\n# =============================================================================\ndef snd_data_split_3(X,Y,train_proportion,test_proportion,validation_proportion):\n size_1 = 1 - train_proportion\n size_2 = validation_proportion/(test_proportion+validation_proportion)\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=size_1)\n x_test, x_valid, y_test, y_valid = train_test_split(x_test, y_test, test_size=size_2)\n return x_train,x_test,x_valid,y_train,y_test,y_valid\n\n# =============================================================================\n# Get actual and predicted Y values and return percentage of accuracy for each \n# class.\n# =============================================================================\ndef class_accuracy_percentage_calc(Y_actual,Y_prediction):\n df_confusion = pd.crosstab(Y_actual, Y_prediction, rownames=['Actual'], colnames=['Predicted'], margins=True)\n count_of_classess = df_confusion.All.size\n class_accuracy_percentage = []\n for i in range(count_of_classess-1):\n class_accuracy_percentage.append(float(df_confusion[i][i])/float(df_confusion.All[i]))\n return class_accuracy_percentage\n","sub_path":"snd_base_additions.py","file_name":"snd_base_additions.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"379003017","text":"import base64\nimport json\nimport os\nimport re\nimport time\nfrom typing import List, Dict, Optional\n\nimport configargparse\nimport requests\nfrom loguru import logger\n\nURL = \"\"\nSESSION_KEY = \"\"\nUSERNAME = \"\"\nPASSWORD = \"\"\n#\n\n\ndef levenshtein_distance(s1: str, s2: str) -> int:\n if s1 == s2:\n return 0\n # https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python\n if len(s1) < len(s2):\n return levenshtein_distance(s2, s1)\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n return previous_row[-1]\n\n\ndef call_method(method: str, params: list, id_: int = 1) -> dict:\n assert len(URL) > 0\n response = requests.post(URL, json={'method': method, 'params': params, 'id': id_})\n # TODO: Add limit\n while not response.text.startswith(('[', '{')):\n logger.warning(\"Response is not valid json, trying again. Actual response: {}\", response.text)\n time.sleep(1)\n response = requests.post(URL, json={'method': method, 'params': params, 'id': id_})\n return response.json()\n\n\ndef call_method_with_session_key(method: str, *params, id_: int = 1, max_retries: int = 3):\n global SESSION_KEY\n for retry_counter in range(1, max_retries + 1):\n try:\n response = call_method(method, [SESSION_KEY] + list(params), id_)['result']\n except Exception:\n logger.exception(\"Error getting method result ({} / {})\", retry_counter, max_retries, diagnose=True)\n continue\n if 'status' in response and response['status'].lower().startswith('invalid') and response['status'].lower().endswith('key'):\n logger.error(\"Invalid session key, getting new session key ({} / {})\", retry_counter, max_retries)\n SESSION_KEY = get_session_key(True)\n continue\n return response\n logger.error(\"Failed getting valid response within specified number of retries\")\n return None\n\n\ndef get_session_key(force: bool = False) -> str:\n if force:\n key = call_method('get_session_key', [USERNAME, PASSWORD]).get('result')\n # TODO: Handle wrong username password combination\n with open('SESSION_KEY', 'w') as file:\n file.write(key)\n return key\n try:\n with open('SESSION_KEY') as file:\n key = file.read()\n except FileNotFoundError:\n key = get_session_key(True)\n return key\n\n\ndef list_surveys(username: str = None) -> list:\n if username:\n return call_method_with_session_key('list_surveys', username)\n else:\n return call_method_with_session_key('list_surveys')\n\n\ndef get_survey_properties(survey_id: int):\n return call_method_with_session_key('get_survey_properties', survey_id)\n\n\n@logger.catch(reraise=True)\ndef export_responses(survey_id: int, document_type: str):\n result = call_method_with_session_key('export_responses', survey_id, document_type, None, 'complete')\n if isinstance(result, dict) and 'status' in result.keys():\n logger.error(\"response: {}, returning empty data for now\", result)\n return {'responses': []}\n return json.loads(base64.b64decode(result).decode())\n\n\ndef list_users():\n return call_method_with_session_key('list_users')\n\n\ndef release_session_key():\n return call_method_with_session_key('release_session_key')\n\n\ndef list_groups(survey_id: int) -> List[Dict[str, str]]:\n return call_method_with_session_key('list_groups', survey_id)\n\n\ndef list_questions(survey_id: int, group_id: Optional[int] = None) -> List[Dict[str, str]]:\n return call_method_with_session_key('list_questions', survey_id, group_id)\n\n\ndef get_question_properties(question_id: int, question_settings: Optional[List[str]] = None) -> Dict[str, str]:\n return call_method_with_session_key('get_question_properties', question_id, question_settings)\n\n\ndef remove_html(s: str) -> str:\n result = re.sub(r'<[^>]*>', '', s)\n for old, new in [('\\r\\n', ' '), ('&', '&'), ('<', '<'), ('>', '>')]:\n result = result.replace(old, new)\n return result\n\n\ndef get_questions_with_answers(survey_id):\n result = []\n groups = sorted(list_groups(survey_id), key=lambda g: int(g.get('group_order')))\n for group in groups:\n group_id = int(group['gid'])\n if group_id == 52:\n continue\n current_group = {'group_name': group['group_name'], 'questions': []}\n questions = sorted(list_questions(survey_id, group_id), key=lambda q: int(q.get('question_order')))\n question_properties = {qid: get_question_properties(qid) for qid in (int(q['qid']) for q in questions)}\n main_qids = sorted(filter(lambda qid: not any(str(qid) in q.get('subquestions', []) for q in question_properties.values() if q.get('subquestions') != 'No available answers')\n and question_properties[qid].get('type') != 'X', question_properties.keys()), key=lambda qid: int(question_properties[qid]['question_order']))\n for qid in main_qids:\n question = question_properties[qid]\n current_question = {'code': question['title'], 'text': remove_html(question['question'])}\n answeroptions = question.get('answeroptions')\n if isinstance(answeroptions, dict):\n answer_codes = sorted(answeroptions.keys(), key=lambda a: int(answeroptions[a]['order']))\n current_question['answeroptions'] = []\n for a in answer_codes:\n current_question['answeroptions'].append({'code': a, 'text': remove_html(answeroptions[a]['answer'])})\n subquestions = question.get('subquestions')\n if isinstance(subquestions, dict):\n sub_qids = sorted(subquestions.keys(), key=lambda sq: int(question_properties[int(sq)].get('question_order')))\n current_question['subquestions'] = []\n for sqid in sub_qids:\n current_question['subquestions'].append({'code': subquestions[sqid]['title'], 'text': remove_html(subquestions[sqid]['question'])})\n current_group['questions'].append(current_question)\n result.append(current_group)\n return result\n\n\n@logger.catch(reraise=True)\ndef convert_limesurvey(data: dict, survey_id: int):\n try:\n with open('question_and_answers.json') as file:\n question_and_answers = json.load(file)\n except:\n with open('question_and_answers.json', 'w') as file:\n question_and_answers = get_questions_with_answers(survey_id)\n json.dump(question_and_answers, file)\n questions = []\n for qa in question_and_answers:\n questions.extend(qa['questions'])\n answeroptions = {}\n for question in questions:\n if 'subquestions' in question.keys():\n for sq in question['subquestions']:\n answeroptions[f'{question[\"code\"]}[{sq[\"code\"]}]'] = question.get('answeroptions', [{'text': 'not quoted', 'code': ''}, {'text': 'quoted', 'code': 'Y'}])\n else:\n answeroptions[question['code']] = question.get('answeroptions', [{'text': 'not quoted', 'code': ''}, {'text': 'quoted', 'code': 'Y'}])\n result = []\n with open('javascript_structure.json') as input_file:\n structure = json.load(input_file)\n for answer in data['responses']:\n current_answer = next(a for a in answer.values())\n # skip sample answer\n if current_answer['id'] == '1':\n continue\n current_result = {'categories': [], 'questions': []}\n for code in structure['categories']:\n answercode = current_answer[code]\n index, opt_code = min(enumerate(answeroptions[code]), key=lambda t: levenshtein_distance(t[1]['code'], answercode))\n if levenshtein_distance(opt_code['code'], answercode) < 2:\n current_result['categories'].append(index)\n else:\n current_result['categories'].append(-1)\n for question in structure['questions']:\n current_question = []\n for code in question:\n answercode = current_answer[code]\n index, opt_code = min(enumerate(answeroptions[code]), key=lambda t: levenshtein_distance(t[1]['code'], answercode))\n if levenshtein_distance(opt_code['code'], answercode) < 2:\n current_question.append(index)\n else:\n current_question.append(-1)\n current_result['questions'].append(current_question)\n\n result.append(current_result)\n return result\n\n\ndef print_questions():\n with open('question_and_answers.json') as input_file:\n question_and_answers = json.load(input_file)\n questions = []\n for qa in question_and_answers:\n questions.extend(qa['questions'])\n for question in questions:\n print(f'\"{question[\"text\"]}\"')\n if 'subquestions' in question.keys():\n print(', '.join(f'\"{sq[\"text\"]}\"' for sq in question['subquestions']))\n print(', '.join(f'\"{question[\"code\"]}[{sq[\"code\"]}]\"' for sq in question['subquestions']))\n else:\n print(f'\"{question[\"code\"]}\"')\n if 'answeroptions' in question.keys():\n print(', '.join(f'\"{ao[\"text\"]}\"' for ao in question['answeroptions']))\n print(', '.join(f'\"{ao[\"code\"]}\"' for ao in question['answeroptions']))\n print()\n\n\ndef setup_args():\n global SESSION_KEY, URL, USERNAME, PASSWORD\n p = configargparse.ArgumentParser(default_config_files=['config.ini'])\n p.add_argument('--api_url', required=True, env_var='LIMESURVEY_URL')\n p.add_argument('--username', required=True, env_var='LIMESURVEY_USERNAME')\n p.add_argument('--password', required=True, env_var='LIMESURVEY_PASSWORD')\n\n options = p.parse_args()\n\n URL = options.api_url\n USERNAME = options.username\n PASSWORD = options.password\n # logger.debug(\"Using URL {}\", URL)\n SESSION_KEY = get_session_key()\n\n\ndef main():\n setup_args()\n survey_id = 197925\n with open('question_and_answers.json', 'w') as file:\n json.dump(get_questions_with_answers(survey_id), file)\n\n\ndef new_main():\n setup_args()\n survey_id = 197925\n data = export_responses(survey_id, 'json')\n answers_string = str(convert_limesurvey(data, survey_id)).replace(\"'categories'\", \"categories\").replace(\"'questions'\", \"questions\")\n with open('limesurvey_data.js', 'w') as file:\n file.write(f\"export const limesurvey_answers = {answers_string};\")\n\n\nif __name__ == '__main__':\n new_main()\n","sub_path":"python/get_limesurvey_results.py","file_name":"get_limesurvey_results.py","file_ext":"py","file_size_in_byte":11002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"619107372","text":"#!/usr/bin/python3\r\nfrom socket import *\r\nfrom threading import Thread\r\nfrom chat import User\r\nfrom datetime import datetime\r\n\r\n\r\nclass ChatServer(Thread):\r\n def __init__(self, port, host=gethostname()):\r\n Thread.__init__(self)\r\n self.port, self.host = port, host\r\n self.s = socket(AF_INET, SOCK_STREAM)\r\n self.users = []\r\n\r\n self.s.bind((self.host, self.port))\r\n self.s.listen()\r\n\r\n self.users.append(User(self.host, self.port, 'server', 'no'))\r\n print('Server is online on port: ', self.port)\r\n\r\n # test\r\n self.connections = []\r\n\r\n def exit(self):\r\n self.s.close()\r\n\r\n def get_users(self):\r\n list_users = 'Users: ['\r\n for i in self.users:\r\n list_users += i.__str__() + '\\n'\r\n list_users += ']'\r\n return list_users\r\n\r\n def run_thread(self, conn, address):\r\n user = self.add_user(conn, address)\r\n print('User :', user, datetime.strftime(datetime.now(), '%H:%M:%S'))\r\n while True:\r\n data = conn.recv(4096).decode('utf-8')\r\n print(data)\r\n if data == '-get_users':\r\n conn.sendall(self.get_users().encode())\r\n msg = user.name + datetime.strftime(datetime.now(), '%H:%M: ') + data\r\n self.send_to_other_clients(msg, conn)\r\n conn.sendall(msg.encode())\r\n conn.close()\r\n\r\n def send_to_other_clients(self, msg, conn):\r\n for c in self.connections:\r\n if c != conn:\r\n c.sendall(msg.encode())\r\n print('Sent for', str(c))\r\n\r\n def add_user(self, conn, address):\r\n conn.send(b'user:')\r\n name = conn.recv(1024).decode('utf-8')\r\n conn.send(b'password:')\r\n password = conn.recv(1024).decode('utf-8')\r\n\r\n user = User(address[0], address[1], name, password)\r\n self.users.append(user)\r\n\r\n print(self.get_users())\r\n\r\n return user\r\n\r\n def run(self):\r\n print('Waiting for connections...')\r\n while True:\r\n conn, address = self.s.accept()\r\n print('Connected to login with: ', address[0], ':', address[1])\r\n self.connections.append(conn)\r\n Thread(target=self.run_thread, args=(conn, address)).start()\r\n\r\n\r\ndef main():\r\n server = ChatServer(8080)\r\n server.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"chat/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"359770833","text":"import os\nimport pathlib\nfrom datetime import datetime, timezone\nfrom os import getcwd\n\nimport requests\nimport yaml\n\nfrom prowler.lib.logger import logger\n\ntimestamp = datetime.today()\ntimestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)\nprowler_version = \"3.4.0\"\nhtml_logo_url = \"https://github.com/prowler-cloud/prowler/\"\nhtml_logo_img = \"https://user-images.githubusercontent.com/3985464/113734260-7ba06900-96fb-11eb-82bc-d4f68a1e2710.png\"\n\norange_color = \"\\033[38;5;208m\"\nbanner_color = \"\\033[1;92m\"\n\n# Compliance\nactual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))\ncompliance_aws_dir = f\"{actual_directory}/../compliance/aws\"\navailable_compliance_frameworks = []\nwith os.scandir(compliance_aws_dir) as files:\n files = [\n file.name\n for file in files\n if file.is_file()\n and file.name.endswith(\".json\")\n and available_compliance_frameworks.append(file.name.removesuffix(\".json\"))\n ]\n\n# AWS services-regions matrix json\naws_services_json_file = \"aws_regions_by_service.json\"\n\n# gcp_zones_json_file = \"gcp_zones.json\"\n\ndefault_output_directory = getcwd() + \"/output\"\n\noutput_file_timestamp = timestamp.strftime(\"%Y%m%d%H%M%S\")\ntimestamp_iso = timestamp.isoformat(sep=\" \", timespec=\"seconds\")\ncsv_file_suffix = \".csv\"\njson_file_suffix = \".json\"\njson_asff_file_suffix = \".asff.json\"\nhtml_file_suffix = \".html\"\nconfig_yaml = f\"{pathlib.Path(os.path.dirname(os.path.realpath(__file__)))}/config.yaml\"\n\n\ndef check_current_version():\n try:\n prowler_version_string = f\"Prowler {prowler_version}\"\n release_response = requests.get(\n \"https://api.github.com/repos/prowler-cloud/prowler/tags\"\n )\n latest_version = release_response.json()[0][\"name\"]\n if latest_version != prowler_version:\n return f\"{prowler_version_string} (latest is {latest_version}, upgrade for the latest features)\"\n else:\n return f\"{prowler_version_string} (it is the latest version, yay!)\"\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n return f\"{prowler_version_string}\"\n\n\ndef change_config_var(variable, value):\n try:\n with open(config_yaml) as f:\n doc = yaml.safe_load(f)\n\n doc[variable] = value\n\n with open(config_yaml, \"w\") as f:\n yaml.dump(doc, f)\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n\n\ndef get_config_var(variable):\n try:\n with open(config_yaml) as f:\n doc = yaml.safe_load(f)\n\n return doc[variable]\n except Exception as error:\n logger.error(f\"{error.__class__.__name__}: {error}\")\n return \"\"\n","sub_path":"prowler/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"477365790","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nComparison of FP16 and FP32 for the Marmousi-II model\n\"\"\"\nfrom urllib.request import urlretrieve\nimport tarfile\nimport numpy as np\nimport os\nimport sys\n\nimport segyio\nimport shutil\nfrom compare_accuracy import compare_accuracy\nfrom plot_comparison import plot_comparison\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nmpl.rcParams.update({'font.size': 7})\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\n\"\"\"\n _______________________Constants for modeling_____________________________\n\"\"\"\nf0 = 40 # Center frequency of the source\nsrcx = 7000 # x position of the source\ntmax = 5\n\n\"\"\"\n_______________________Download the velocity model_____________________________\n\"\"\"\nurl = \"https://s3.amazonaws.com/open.source.geoscience/open_data/elastic-marmousi/elastic-marmousi-model.tar.gz\"\nif not os.path.isfile(\"elastic-marmousi-model.tar.gz\"):\n urlretrieve(url, filename=\"elastic-marmousi-model.tar.gz\")\n tar = tarfile.open(\"elastic-marmousi-model.tar.gz\", \"r:gz\")\n tar.extractall()\n tar.close()\n\nmodels_segy = {\n 'vp': './elastic-marmousi-model/model/MODEL_P-WAVE_VELOCITY_1.25m.segy',\n 'vs': './elastic-marmousi-model/model/MODEL_S-WAVE_VELOCITY_1.25m.segy',\n 'rho': './elastic-marmousi-model/model/MODEL_DENSITY_1.25m.segy'}\n\nmodels_tar = {\n 'vp': './elastic-marmousi-model/model/MODEL_P-WAVE_VELOCITY_1.25m.segy.tar.gz',\n 'vs': './elastic-marmousi-model/model/MODEL_S-WAVE_VELOCITY_1.25m.segy.tar.gz',\n 'rho': './elastic-marmousi-model/model/MODEL_DENSITY_1.25m.segy.tar.gz'}\n\nmodels = {\n 'vp': None,\n 'vs': None,\n 'rho': None}\n\n\nfor par in models:\n if not os.path.isfile(models_segy[par]):\n tar = tarfile.open(models_tar[par], \"r:gz\")\n tar.extractall(path=\"./elastic-marmousi-model/model\")\n tar.close()\n with segyio.open(models_segy[par], \"r\", ignore_geometry=True) as segy:\n models[par] = [segy.trace[trid] for trid in range(segy.tracecount)]\n models[par] = np.transpose(np.array(models[par])[:, :])\n\n(NZ, NX) = models['rho'].shape\nNZ = int(NZ / 2) * 2\nNX = int(NX / 2) * 2\nfor par in models:\n models[par] = models[par][:NZ, :NX]\nmodels[\"rho\"] = models[\"rho\"] * 1000\ndh = 1.25\n\n\"\"\"\n_____________________________Plot models______________________________________\n\"\"\"\nfig, axs = plt.subplots(3, 1, figsize=(9 / 2.54, 13 / 2.54))\nims = {}\nunits = {'vp': 'm/s', 'vs': 'm/s', 'rho': 'kg/m$^3$'}\ntitles = {'vp': 'a)', 'vs': 'b)', 'rho': 'c)'}\nparams = ['vp', 'vs', 'rho']\nfor ii, par in enumerate(params):\n ims[par] = axs[ii].imshow(models[par] / 1000, interpolation='bilinear',\n extent=[0, (NX + 1) * dh / 1000 / 2,\n (NZ + 1) * dh / 1000, 0])\n axs[ii].set_xlabel('x (km)')\n axs[ii].set_ylabel('z (km)')\n axs[ii].set_title(titles[par])\n axs[ii].set_xticks(np.arange(0, 9, 0.5))\n axs[ii].set_yticks(np.arange(0, 4, 0.5))\n axs[ii].set_xticklabels([str(el) for el in np.arange(0, 18)])\n divider = make_axes_locatable(axs[ii])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n clr = plt.colorbar(ims[par], cax=cax)\n cax.xaxis.tick_top()\n cax.set_xlabel(units[par], labelpad=8)\n cax.xaxis.set_label_position('top')\nplt.tight_layout()\nplt.savefig('marmousiII.eps', dpi=300)\n\n\"\"\"\n_____________________Perform the comparison ___________________________\n\"\"\"\ncompare_accuracy(\"marm2\", models, f0, srcx, tmax, dh)\n\n\"\"\"\n _____________________Plot the figure ___________________________\n\"\"\"\nplot_comparison(\"marm2\", 10, 20, 1, 16, 0.2)\n\n","sub_path":"Fig3_4_6_error/marmousi2.py","file_name":"marmousi2.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"415064733","text":"from bottle import *\nimport json\nimport os\nimport sys\nimport mysql.connector\n\npagePort = 18155\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"student\",\n passwd=\"fredfredburger\",\n database=\"chickadees\"\n\n)\nprint(\"**mydb is: \",mydb)\nprint(\"**database: \", mydb.database)\n\n@post('/api/visits')\ndef getJson():\n data = request.json\n payload = base64.b64decode(data['payload_raw'])\n #payloadTimestamp = data.metadata.gateways['timestamp']\n payloadTimestamp = data['metadata']['time']\n #payloadTimestamp = 'seven'\n dataParseTesting = data['metadata']\n '''\n # ** Testing Purposes **\n print(\"*** METADATA ***\")\n print(dataParseTesting)\n print(\"*** END-METADATA ***\")\n print(\"------------------------------------------\")\n print(\"*** TIME ***\")\n print(payloadTimestamp)\n print(\"*** END-TIME ***\")\n print(\"------------------------------------------\")\n print(\"*** DATA ***\")\n print(data) #print entire data packet. Including meta data\n print(\"*** END-DATA ***\")\n print(\"------------------------------------------\")\n '''\n \n print(\"**payload: \",payload)\n print(\"**Count: \",data['counter'])\n print(\"timestamp: \", payloadTimestamp)\n\n timestamp = (payload[10:20]).decode(\"utf-8\")\n rfid = (payload[0:10]).decode(\"utf-8\")\n print(\"*TIMESTAMP: \",timestamp)\n print(\"*RFID: \", rfid)\n\n insert_tuple = (rfid,timestamp)\n\n mycursor = mydb.cursor()\n\n #tmpInsert = \" INSERT INTO visits (rfid, visitTimestamp) VALUES (\" + rfid + \" , \" + timestamp + \") \"\n tmpSearch = \"Select * from visits where rfid = %s and visitTimestamp = %s\"\n #sqlInsert = \"\"\" INSERT INTO visits (rfid, feederID, visitTimestamp, temperature, mass, bandCombo, isSynced) VALUES ('011016C1B6', 'SHRM', '%s', '24', '24', 'g0/Y#', '0') \"\"\"\n \n searchRes = mycursor.execute(tmpSearch,(rfid,int(timestamp)))\n rowCount = mycursor.fetchone()\n if not rowCount:\n tmpInsert = \"INSERT INTO visits (rfid,feederID,visitTimestamp,temperature, mass, bandCombo, isSynced) VALUES (%s, 'CLIF', %s, 0, 0, '', 0)\"\n insertRes = mycursor.execute(tmpInsert,(rfid,int(timestamp)))\n mydb.commit() #uncomment to actually commit INSERT into DB.\n print(\"RowCount: \",rowCount)\n \n\n return data\n\n\nrun(host='euclid.nmu.edu', port=pagePort, debug=True)\n\n\n\n\"\"\"\n ** Test Tags **\n0700EDFC4A\n011016A32F\n\n\n ** JSON data **\n \n{\n 'app_id': 'production2019jan', \n 'dev_id': 'node1', \n 'hardware_serial': '0099DF663BAB7B4B', \n 'port': 1, \n 'counter': 0, \n 'confirmed': True, \n 'is_retry': True, \n 'payload_raw': 'MDExMDE2QTMyRg==', \n 'metadata':{\n 'time': '2019-03-20T00:45:16.959721306Z', \n 'frequency': 904.5, \n 'modulation': 'LORA', \n 'data_rate': 'SF10BW125', \n 'coding_rate': '4/5', \n 'gateways': [{\n 'gtw_id': 'eui-b827ebfffe11f166', \n 'timestamp': 176353300, # NEED THIS VALUE!!\n 'time': '2019-03-20T00:45:16.926417Z', # or this I guess...\n 'channel': 3, \n 'rssi': -114, \n 'snr': -15.2, \n 'rf_chain': 0, \n 'latitude': 46.54527, \n 'longitude': -87.40362, \n 'location_source': 'registry'\n }]\n },\n 'downlink_url': 'https://integrations.thethingsnetwork.org/ttn-us-west/api/v2/down/production2019jan/euclid?key=ttn-account-v2.qnXQCj7ir6DDJ7-YwbF5qbnRQTWB4CG1RcqvQOSsmKM'\n}\n\n\n\n{'app_id': 'production2019jan', 'dev_id': 'node1', 'hardware_serial': '0099DF663BAB7B4B', 'port': 1, 'counter': 0, 'confirmed': True, 'is_retry': True, 'payload_raw': 'MDExMDE2QTMyRg==', 'metadata': {'time': '2019-03-20T00:45:16.959721306Z', 'frequency': 904.5, 'modulation': 'LORA', 'data_rate': 'SF10BW125', 'coding_rate': '4/5', 'gateways': [{'gtw_id': 'eui-b827ebfffe11f166', 'timestamp': 176353300, 'time': '2019-03-20T00:45:16.926417Z', 'channel': 3, 'rssi': -114, 'snr': -15.2, 'rf_chain': 0, 'latitude': 46.54527, 'longitude': -87.40362, 'location_source': 'registry'}]}, 'downlink_url': 'https://integrations.thethingsnetwork.org/ttn-us-west/api/v2/down/production2019jan/euclid?key=ttn-account-v2.qnXQCj7ir6DDJ7-YwbF5qbnRQTWB4CG1RcqvQOSsmKM'}\n\n\n\"\"\"\n","sub_path":"oldAPI.py","file_name":"oldAPI.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"11471918","text":"import operator\nimport pdb\nfrom copy import deepcopy\nfrom functools import reduce\nfrom typing import List, Dict, Any\n\nfrom mockfirestore import NotFound\nfrom mockfirestore._helpers import (\n Timestamp, Document, Store, get_by_path, set_by_path, delete_by_path, get_document_iterator,\n parse_field_path\n)\n\n\nclass DocumentSnapshot:\n def __init__(self, reference: 'DocumentReference', data: Document) -> None:\n self.reference = reference\n self._doc = deepcopy(data)\n\n @property\n def id(self):\n return self.reference.id\n\n @property\n def exists(self) -> bool:\n return self._doc != {}\n\n def to_dict(self) -> Document:\n return self._doc\n\n @property\n def create_time(self) -> Timestamp:\n timestamp = Timestamp.from_now()\n return timestamp\n\n @property\n def update_time(self) -> Timestamp:\n return self.create_time\n\n @property\n def read_time(self) -> Timestamp:\n timestamp = Timestamp.from_now()\n return timestamp\n\n def get(self, field_path: str) -> Any:\n if not self.exists:\n return None\n else:\n return reduce(operator.getitem, field_path.split('.'), self._doc)\n\n def _get_by_field_path(self, field_path: str) -> Any:\n try:\n return self.get(field_path)\n except KeyError:\n return None\n\n\nclass DocumentReference:\n def __init__(self, data: Store, path: List[str],\n parent: 'CollectionReference') -> None:\n self._data = data\n self._path = path\n self.parent = parent\n\n @property\n def id(self):\n return self._path[-1]\n\n def get(self) -> DocumentSnapshot:\n return DocumentSnapshot(self, get_by_path(self._data, self._path))\n\n def delete(self):\n delete_by_path(self._data, self._path)\n\n def set(self, data: Dict, merge=False):\n if merge:\n try:\n self.update(deepcopy(data))\n except NotFound:\n self.set(data)\n else:\n set_by_path(self._data, self._path, deepcopy(data))\n\n def update(self, data: Dict[str, Any]):\n document = get_by_path(self._data, self._path)\n if document == {}:\n raise NotFound('No document to update: {}'.format(self._path))\n\n _apply_transformations(document, deepcopy(data))\n\n def collection(self, name) -> 'CollectionReference':\n from mockfirestore.collection import CollectionReference\n document = get_by_path(self._data, self._path)\n new_path = self._path + [name]\n if name not in document:\n set_by_path(self._data, new_path, {})\n return CollectionReference(self._data, new_path, parent=self)\n\n\ndef _apply_transformations(document: Dict[str, Any], data: Dict[str, Any]):\n \"\"\"Handles special fields like INCREMENT.\"\"\"\n increments = {}\n arr_unions = {}\n arr_deletes = {}\n deletes = []\n\n for key, value in list(get_document_iterator(data)):\n if not value.__class__.__module__.startswith('google.cloud.firestore'):\n # Unfortunately, we can't use `isinstance` here because that would require\n # us to declare google-cloud-firestore as a dependency for this library.\n # However, it's somewhat strange that the mocked version of the library\n # requires the library itself, so we'll just leverage this heuristic as a\n # means of identifying it.\n #\n # Furthermore, we don't hardcode the full module name, since the original\n # library seems to use a thin shim to perform versioning. e.g. at the time\n # of writing, the full module name is `google.cloud.firestore_v1.transforms`,\n # and it can evolve to `firestore_v2` in the future.\n continue\n\n transformer = value.__class__.__name__\n if transformer == 'Increment':\n increments[key] = value.value\n elif transformer == 'ArrayUnion':\n arr_unions[key] = value.values\n elif transformer == 'ArrayRemove':\n arr_deletes[key] = value.values\n del data[key]\n elif transformer == 'Sentinel':\n if value.description == \"Value used to delete a field in a document.\":\n deletes.append(key)\n del data[key]\n\n # All other transformations can be applied as needed.\n # See #29 for tracking.\n \n def _update_data(new_values: dict, default: Any):\n for key, value in new_values.items():\n path = key.split('.')\n\n try:\n item = get_by_path(document, path)\n except (TypeError, KeyError):\n item = default\n\n set_by_path(data, path, item + value, create_nested=True)\n\n _update_data(increments, 0)\n _update_data(arr_unions, [])\n\n _apply_updates(document, data)\n _apply_deletes(document, deletes)\n _apply_arr_deletes(document, arr_deletes)\n\n\ndef _apply_deletes(document: Dict[str, Any], data: List[str]):\n for key in data:\n path = parse_field_path(str(key))\n delete_by_path(document, path)\n\n\ndef _apply_arr_deletes(document: Dict[str, Any], data: Dict[str, Any]):\n for key, values_to_delete in data.items():\n path = parse_field_path(str(key))\n try:\n value = get_by_path(document, path)\n except KeyError:\n continue\n for value_to_delete in values_to_delete:\n try:\n value.remove(value_to_delete)\n except ValueError:\n pass\n set_by_path(document, path, value)\n\n\ndef _apply_updates(document: Dict[str, Any], data: Dict[str, Any]):\n for key in list(data.keys()):\n path = parse_field_path(str(key))\n set_by_path(document, path, data[key], create_nested=True)\n","sub_path":"mockfirestore/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"267643857","text":"from messageSender import *\n\nsender = messageSender()\n\nsender.loadDefinedSurfaces(\"DEFAULT\")\ncur = sender.newCursor(0, 0.5, 0.5, \"prop\")\nlist = {}\nfor x in range(1,5):\n win = sender.newCanvas(x, 0, 1, 1, 1,\"prop\", \"mywin\")\n list[x] = win\n sender.newRectangle(win, 0, 1, 1, 1, \"prop\", (1,0,0,1), 10, (0,0,1,1))\n sender.newCircle(win, 0.5, 0.5, 0.25, \"prop\", (1,1,1,1), 10, (1,1,1,1), 10)\nfor x in range(1, 5):\n sender.newTexRectangle(list[x], 0.5, 1, 0.5, 0.5, \"prop\", \"checks.jpg\")\nlTor = True\ntTob = True\nsurfaceWidth = sender.getSurfacePixelWidth(0)\nsurfaceHeight = sender.getSurfacePixelWidth(0)\nwhile (True):\n if lTor == True:\n if tTob == True:\n sender.shiftCursor(cur, 5, -3)\n else:\n sender.shiftCursor(cur, 5, 3)\n else:\n if tTob == True:\n sender.shiftCursor(cur, -5, -3)\n else:\n sender.shiftCursor(cur, -5, 3)\n loc = sender.getCursorPosition(cur)\n if float(loc[0]) < 0:\n lTor = True\n elif float(loc[0]) > surfaceWidth:\n lTor = False\n if float(loc[1]) < 0:\n tTob = False\n elif float(loc[1]) > surfaceHeight:\n tTob = True","sub_path":"src/UnityTesting.py","file_name":"UnityTesting.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"131929484","text":"# -*- coding:utf-8 -*-\nimport argparse\nimport os\nimport cv2 as cv\nimport numpy as np\nfrom PIL import Image\n\nimport chainer\nfrom chainer import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer import reporter\n\n\ndef get_movie_filename(path, extention=\"mpg\"):\n \"\"\"\n get file_name which has specified extention (ex. mp4 )\n\n :param path: relative path\n :param extention:\n :return: file_names(full path)\n \"\"\"\n\n file_names = list(filter(lambda file_name: extention in file_name, os.listdir(path)))\n file_names = map(lambda file_name: os.getcwd() + \"/\" + path[2:] + \"/\" + file_name, file_names)\n\n return file_names\n\n\ndef stack(source, dist, module=np):\n \"\"\"\n\n :param source: source stacked nparray(or Variable)\n :param dist:\n :param module:\n :return: stacked nparray\n \"\"\"\n\n if source is None:\n source = dist\n else:\n source = module.vstack((source, dist))\n\n return source\n\n\ndef get_movies(full_paths, frame_count=300, size=(240, 320), dtype=np.float32):\n \"\"\"\n\n :param full_paths:\n :param frame_count:\n :return: nparray , shape = (movie_file , frame_count , RGB , width , height)\n \"\"\"\n\n movie_batches = None\n\n for file_name in full_paths:\n\n movie = cv.VideoCapture(file_name)\n movie_stack = None\n\n for frame in range(frame_count):\n try:\n ret, image = movie.read()\n image = cv.resize(image, size)\n image = np.asarray(image, dtype=dtype)\n image = image.transpose(2, 0, 1)\n movie_frame = np.expand_dims(image, axis=0)\n\n movie_stack = stack(movie_stack, movie_frame)\n\n except:\n print(\"something wrong (in get_movie)\")\n exit(0)\n\n movie_stack = np.expand_dims(movie_stack, axis=0)\n movie_batches = stack(movie_batches, movie_stack)\n\n # ( length , movie , RGB , height , width )\n movie_batches = movie_batches.transpose(1, 0, 2, 3, 4).astype(np.float32)\n\n return movie_batches\n\n\ndef make_movie(nparray_movie, file_name, fps):\n \"\"\"\n :param nparray: shape = (length ,3,width , height)\n :return:\n \"\"\"\n\n # fps = movie.get(cv.CAP_PROP_FPS)\n width = nparray_movie.shape[2]\n height = nparray_movie.shape[3]\n fourcc = cv.VideoWriter_fourcc('m', 'p', '4', 'v')\n file = open(file_name, \"wb\")\n out = cv.VideoWriter(file_name, int(fourcc), 24, (int(width), int(height)))\n for _image in nparray_movie:\n pil_image = Image.fromarray(_image.transpose(1, 2, 0).astype(np.uint8)[:, :, ::-1].copy())\n cv_image = cv.cvtColor(np.array(pil_image), cv.COLOR_RGB2BGR)\n\n out.write(cv.resize(cv_image, (width, height)))\n\n\ndef make_teacher_signal(nparray_movies):\n \"\"\"\n :param nparray_movie: ( length , movie , RGB , height , width )\n :return: TupleDataset ( t_frame , t+1_frame)\n \"\"\"\n\n input_movie = nparray_movies[:-1]\n teacher_movie = nparray_movies[1:]\n dataset = chainer.datasets.TupleDataset(input_movie, teacher_movie)\n\n return dataset\n\n\n# (batch , len , RGB , height , width )\n# (len , batch , RGB , height , width ) -> こっちの方が妥当っぽい\n\n# numpy を綺麗に画像化する\n# image_RGB = Image.fromarray(_image.transpose(1,2,0).astype(np.uint8)[: , : , ::-1].copy())\n\n\n\nclass MovieGen(chainer.Chain):\n def __init__(self, channels, shape):\n self.shape = shape[0] * shape[1] * 3 # RGB\n self.image_width = shape[0]\n self.image_height = shape[1]\n self.layers = len(channels)\n super(MovieGen, self).__init__()\n\n for nth in range(len(channels)):\n self.add_link('Linear_' + str(nth), L.Linear(out_size=channels[nth], in_size=None))\n\n self.accfun = F.accuracy\n self.lossfun = F.mean_squared_error\n self.compute_accuracy = None\n\n def __forward(self, h):\n\n \"\"\"\n inner function\n :param h: input image (3, width , height)\n :return: y\n \"\"\"\n\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n self.y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n\n return self.y\n\n def __call__(self, *args):\n data = args[0]\n teacher = args[1]\n # (movie_len , batchsize[movie_] , 3, 20, 20)\n batchsize = data.shape[1]\n\n self.y = None\n self.loss = 0\n self.accuracy = None\n\n for i, (x, t) in enumerate(zip(data, teacher)):\n\n # expected x.data.shape = ( batch , RGB , height, width )\n\n # for 1 dim ( Linear )\n h = F.reshape(x, (batchsize, self.shape))\n t = F.reshape(t, (batchsize, self.shape))\n # for layers\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n self.y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n\n # self.loss = self.lossfun(self.y, t)\n self.loss += F.mean_squared_error(self.y, t)\n\n reporter.report({'loss': self.loss}, self)\n # if self.compute_accuracy:\n # self.accuracy = self.accfun(self.y, t)\n # reporter.report({'accuracy': self.accuracy}, self)\n return self.loss\n\n def generate_movie(self, first_image, generate_len, movie_name=\"/Users/g329/deep_learning/trend/generated.mp4\"):\n \"\"\"\n if model has lstm , do reset_state !\n\n :param first_image: expeceted shape : 3,width , height\n :param generate_len:\n :param movie_name: this need full path\n :return:\n \"\"\"\n\n generated_image = np.zeros((generate_len, 3, self.image_width, self.image_height))\n previous = first_image\n for frame in range(generate_len - 1):\n # for layers\n h = F.reshape(previous, (1, self.shape))\n for nth in range(self.layers - 1):\n h = F.tanh(getattr(self, 'Linear_' + str(nth))(h))\n y = getattr(self, 'Linear_' + str(self.layers - 1))(h)\n generated_image[frame] = F.reshape(y, (3, self.image_width, self.image_height)).data\n\n print(generated_image.shape)\n make_movie(generated_image, file_name=movie_name, fps=24)\n print(\"generate end\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--epoch', '-e', default=10, type=int, help='epochs (default 50)')\n parser.add_argument('--channels', '-c', default=\"100 100 100\", type=str, help='epochs (default 50')\n parser.add_argument('--width', '-w', default=120, type=int, help='')\n parser.add_argument('--height', default=160, type=int, help='')\n parser.add_argument('--batchsize', default=10, type=int, help='batchsize of train and test')\n parser.add_argument('--movie_len', default=60, type=int, help='movie len (all)')\n parser.add_argument('--split_at', default=40, type=int, help='split point for train/test')\n\n args = parser.parse_args()\n size = (args.width, args.height)\n out_size = [size[0] * size[1] * 3]\n\n # parse channels\n args.channels = [int(channel) for channel in args.channels.split()] + out_size\n\n xp = cuda.cupy if args.gpu >= 0 else np\n\n size = (args.width, args.height)\n size = (40, 30)\n out_size = [size[0] * size[1] * 3]\n\n # list -> list\n model = MovieGen(channels=args.channels + out_size, shape=size)\n optimizer = chainer.optimizers.RMSprop()\n optimizer.setup(model)\n\n movie_len = 30 + 1 # want to predict(frame)\n args.split_at = 20\n file_names = get_movie_filename(\"./data\")\n # print(\"used movies : \" + \" \".join(file_names))\n print(\"movie loading...\")\n movies = get_movies(file_names, frame_count=movie_len, size=size)\n # movie = make_movie(movies[0], \"0_movie.mp4\",fps=24)\n\n\n data = make_teacher_signal(movies)\n\n train, test = chainer.datasets.split_dataset(dataset=data, split_at=args.split_at)\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)\n\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=\"result\")\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss']))\n trainer.extend(extensions.ProgressBar())\n\n print(\"train start\")\n trainer.run()\n print(\"train end\")\n\n print(\"generate start\")\n\n model.generate_movie(first_image=train[0][0][0], generate_len=60,\n movie_name=\"/Users/g329/deep_learning/trend/train_generated.mp4\")\n model.generate_movie(first_image=test[0][0][0], generate_len=60,\n movie_name=\"/Users/g329/deep_learning/trend/test_generated.mp4\")\n","sub_path":"tools/movie_utils.py","file_name":"movie_utils.py","file_ext":"py","file_size_in_byte":9111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"342090362","text":"# __author__: Stanley\n# date: 2018/3/13\n\n\n# 参考连接\n# http://www.runoob.com/python/python-built-in-functions.html\n# https://docs.python.org/3.5/library/functions.html\n\n# abs返回绝对值\n# print(abs(-333))\n\n\n# filter,过滤器的功能(可迭代对象)\ns = [1,2,3,4]\ndef func(c):\n if c != 3:\n return c\n\nprint(\"正常调用返回\",func(s))\nret = filter(func,s)\nprint(ret) # 返回的是一个迭代器\nprint(\"通过filter调用\",list(ret))\n\n# map 修改(可迭代对象)\ns = ['a1','b1','c1']\n\ndef func_map(s):\n return s + 'alvin'\n\nret = map(func_map,s)\nprint(ret)\nprint(list(ret))\n\n\n# reduce\nfrom functools import reduce\n\ndef add_reduce(x,y):\n return x+y\n\nprint(reduce(add_reduce,range(1,5))) # 结果就是一个数值。\n# 实现原理\n# [1,2,3,4,]\n# [3,3,4,]\n# [6,4]\n# 10\n\n# lambda 匿名函数\nnum = lambda a,b:a+b\nprint(num(1,2))\n\n# lambda配合reduce实现阶乘\nprint(reduce(lambda x,y:x*y,range(1,6)))","sub_path":"day15/内置函数.py","file_name":"内置函数.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"69254958","text":"# -*- coding: utf-8 -*-\nfrom postprocess.control_vars import *\nfrom postprocess import limit_state_data as lsd\nfrom postprocess.xcVtk import vtk_graphic_base\nfrom postprocess.xcVtk.FE_model import vtk_FE_graphic\nfrom postprocess.xcVtk.diagrams import control_var_diagram as cvd\n\nmodel_path=\"../\"\n#Project directory structure\nexec(open(model_path+'env_config.py').read())\n\nmodelDataInputFile=model_path+\"model_gen.py\" #data for FE model generation\nexec(open(modelDataInputFile).read())\n\n\n#Load properties to display:\nfName= cfg.projectDirTree.getVerifShearFile()\nexec(open(fName).read())\n\n\n\nlimitStateLabel= lsd.shearResistance.label\n\n\n#Available arguments: 'CF', 'N', 'My', 'Mz', 'Mu', 'Vy', 'Vz', 'theta', 'Vcu', 'Vsu', 'CF'\nargument= 'Vcu'\n\nsetDispRes=beamX #set of linear elements to which display results \n\n#setDisp=overallSet #set of elements (any type) to be displayed\nsetDisp=beamX\n\ndiagram= cvd.ControlVarDiagram(scaleFactor= 1,fUnitConv= 1,sets=[setDispRes],attributeName= limitStateLabel,component= argument)\ndiagram.addDiagram()\n\n\ndisplaySettings= vtk_FE_graphic.DisplaySettingsFE()\n #predefined view names: 'XYZPos','XNeg','XPos','YNeg','YPos',\n # 'ZNeg','ZPos' (defaults to 'XYZPos')\ndisplaySettings.cameraParameters= vtk_graphic_base.CameraParameters('YPos') #Point of view.\ndisplaySettings.setupGrid(setDisp)\ndisplaySettings.defineMeshScene(None,defFScale=0.0)\ndisplaySettings.appendDiagram(diagram) #Append diagram to the scene.\n\ncaption= cfg.capTexts[limitStateLabel] + ', ' + cfg.capTexts[argument] + '. '+ setDispRes.description.capitalize() + ', ' + 'Dir. 1'\ndisplaySettings.displayScene(caption)\n\n\n\n","sub_path":"ave_SR/voided/display/display_shearULS_beamEl.py","file_name":"display_shearULS_beamEl.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"194615539","text":"from Projector import Projector\nimport numpy as np\n\nclass RBF(Projector):\n \n def __init__(self,num_centers=np.array([5]),\n stdev=0.1,limits=np.array([[0,1],[0,1]]),\n randomize=False,normalize = True,bias=True):\n \n super(RBF,self).__init__(self,normalize,limits,bias) \n if not (type(num_centers) is np.ndarray):\n num_centers = np.array(num_centers)\n if num_centers.size == 1:\n num_centers = np.ones(limits.shape[0])*num_centers[0]\n self.stdev = stdev\n dim = []\n if randomize:\n #randomly spaced centers\n for d in range(limits.shape[0]):\n dim.append(np.sort(np.random.rand(num_centers[d])))\n else: \n #equally spaced centers\n for d in range(limits.shape[0]):\n dim.append(np.linspace(0,1,num_centers[d]))\n if len(dim) == 1:\n self.centers=dim[0].flatten()\n else:\n grid = np.meshgrid(*dim)\n self.centers=grid[0].flatten()\n for d in range(1,len(grid)):\n self.centers = np.c_[self.centers,grid[d].flatten()]\n\n \n def num_features(self):\n return (self.centers.shape[0]+int(self.bias))\n \n def phi(self,state):\n if len(self.centers.shape)==1:\n dists = self.centers-self.normalize_state(state)\n else:\n dists = np.linalg.norm(self.centers-\n self.normalize_state(state),axis=1)\n res = np.exp(-0.5 * dists**2 / self.stdev**2)\n \n return res","sub_path":"src/features/projectors/RBF.py","file_name":"RBF.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"83309801","text":"import openpyxl\nimport sys\nimport os\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename\nimport csv\n\n\nroot = Tk( )\nnamelabels = []\nnames = []\n\n#This is where we lauch the file manager bar.\ndef OpenFile():\n name = askopenfilename(initialdir=\"C:/Users/Batman/Documents/Programming/tkinter/\",\n filetypes =((\"Excel File\", \"*.xlsx\"),(\"All Files\",\"*.*\")),\n title = \"Choose a file.\"\n )\n print (name)\n \n namelabel = ttk.Label(root, text = name + \" has been added.\",foreground=\"Green\",font=(\"Helvetica\",18))\n names.append(name)\n namelabels.append(namelabel)\n nl_len = len(namelabels)\n namelabels[nl_len-1].pack()\n \n \ndef Generate():\n wb = openpyxl.Workbook()\n sheet = wb.active\n tracker = 0\n tracked = False\n source_list = []\n for name in names:\n wb_source = openpyxl.load_workbook(name)\n ws_source = wb_source.active\n source_list.append(ws_source)\n \n for source in source_list:\n if(tracked):\n for row in range(2,source.max_row+1):\n tracker+=1\n for cell in range(1,source.max_column+1):\n sheet.cell(row=tracker,column=cell).value = source.cell(row=row,column=cell).value\n else:\n for row in range(1,source.max_row+1):\n tracker+=1\n for cell in range(1,source.max_column+1):\n sheet.cell(row=tracker,column=cell).value = source.cell(row=row,column=cell).value\n tracked=True\n \n # wb.save(\"C:\\\\Users\\\\cpearson\\\\Documents\\\\python_excel\\\\merged.xlsx\")\n wb.save()\n label = ttk.Label(root, text =\"File Generated!\",foreground=\"Red\",font=(\"Helvetica\", 18))\n label.pack()\n'''\ndef removeItem(namelabels):\n namelabels[len(namelabels)-1].pack_forget()\n if(len(namelabels)>1):\n namelabels = namelabels[:-1]\n\n\n'''\nTitle = root.title( \"Excel File Merger\")\nlabel = ttk.Label(root, text =\"Open excel files by pressing file at the top left\",foreground=\"blue\",font=(\"Helvetica\", 18))\nlabel.pack()\n \n#Menu Bar\n\nmenu = Menu(root)\nroot.config(menu=menu)\n\nfile = Menu(menu)\n\n# lambda: namelabels[len(namelabels)-1].pack_forget()\n\nfile.add_command(label = 'Open', command = OpenFile)\nfile.add_command(label = 'Run', command = Generate)\nfile.add_command(label = 'Reset', command = lambda:os.execl(sys.executable,sys.executable,* sys.argv))\nfile.add_command(label = 'Exit', command = lambda:exit())\n\nmenu.add_cascade(label = 'File', menu = file)\n\nroot.mainloop()\n\n##try:\n## with open(name,'r') as UseFile:\n## print(UseFile.read())\n## except:\n## print(\"No file exists\")\n","sub_path":"excel_merger.py","file_name":"excel_merger.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"163742595","text":"#coding: utf-8\n\nfrom django.conf.urls import patterns, url\nfrom .views import PostDetailView, PostListView\n\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', PostListView.as_view(), name='list'),\n url(r'^(?P[0-9]+)/$', PostDetailView.as_view(), name='detail'), #name='details'??\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"276433733","text":"class Database:\n def __init__(self):\n self.data = 1\n\ndatabase = None\n\n\ndef initialize_database():\n # global 声明模块级别变量,防止函数外变量失效\n global database\n database = Database().data\n\n# 单独运行本脚本时启用\nif __name__ == \"__main__\":\n initialize_database()\n print(\"__main__\")\nelse:\n print(__name__)\n\n# 别的模块引用时 __name__ 变量会变为模块名\nif __name__ == \"ecommerce.database\":\n print(\"this module is quoted\")\n","sub_path":"ecommerce/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"227970547","text":"# -*- coding: utf-8 -*-\nimport time\nimport json\nimport requests\nimport socket\nfrom struct import pack\n\nimport urllib, datetime\nfrom xml.etree import ElementTree as ET\n\n\nclass API:\n def __init__(self, **kwargs):\n # Initialized common attributes\n self.variables = kwargs\n self.debug = True\n self.set_variable('offline_count', 0)\n self.set_variable('connection_renew_interval', 6000)\n\n def renewConnection(self):\n pass\n\n def set_variable(self, k, v): # k=key, v=value\n self.variables[k] = v\n\n def get_variable(self, k):\n return self.variables.get(k, None) # default of get_variable is none\n\n '''\n Attributes:\n ------------------------------------------------------------------------------------------\n label GET label in string\n status GET status\n unitTime GET time\n type GET type \n ------------------------------------------------------------------------------------------\n '''\n\n '''\n API3 available methods:\n 1. getDeviceStatus() GET\n 2. setDeviceStatus() SET\n '''\n\n # ----------------------------------------------------------------------\n # getDeviceStatus(), printDeviceStatus()\n def getDeviceStatus(self):\n\n getDeviceStatusResult = True\n\n try:\n print(\"Get Status eGauge Power Meter\")\n\n # Get XML from eGauge device\n url = \"http://\" + self.get_variable(\"bearer\") + \".egaug.es/cgi-bin/egauge?noteam\"\n\n # Parse the results\n raw_data = ET.parse(urllib.urlopen(url)).getroot()\n print(raw_data)\n\n self.getDeviceStatusJson(raw_data)\n self.printDeviceStatus()\n\n if getDeviceStatusResult==True:\n self.set_variable('offline_count', 0)\n else:\n self.set_variable('offline_count', self.get_variable('offline_count')+1)\n except Exception as er:\n print (er)\n print('ERROR: classAPI_Egauge_PowerMeter failed to getDeviceStatus')\n\n def getDeviceStatusJson(self, data):\n\n # conve_json = json.loads(data)\n print(data)\n\n # self.set_variable('device_label', str(conve_json[\"label\"]))\n # self.set_variable('device_type', str(conve_json[\"type\"]).upper())\n # self.set_variable('unitTime', str(conve_json[\"unitTime\"]))\n # self.set_variable('status', str(conve_json[\"contact\"]).upper())\n\n def printDeviceStatus(self):\n # now we can access the contents of the JSON like any other Python object\n print(\" the current status is as follows:\")\n # print(\" label = {}\".format(self.get_variable('label')))\n\n # ----------------------------------------------------------------------\n\n\n# This main method will not be executed when this class is used as a module\ndef main():\n # -------------Kittchen----------------\n meter = API(model='eGauge', api='API3', agent_id='05EGA010101', types='powermeter', device='egauge50040',\n ip='192.168.1.8', port=82)\n\n meter.getDeviceStatus()\n time.sleep(3)\n\n\nif __name__ == \"__main__\": main()\n","sub_path":"EgaugeMeterAgent/egaugemeteragent/extension/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"40413488","text":"import tensorflow as tf\nimport os, sys\n\ndn = os.path.dirname\nabs_path = os.path.abspath(__file__)\nsys.path.append(dn(abs_path))\nsys.path.append(dn(dn(abs_path)))\ndel dn, abs_path\n\nfrom tframe import console\nfrom tframe import FLAGS\n\nfrom signals.utils.dataset import load_wiener_hammerstein, DataSet\n\nimport lott_lib\n\n\ndef main(_):\n console.start('Lottery')\n\n # Configurations\n MARK = 'mlp00'\n MEMORY_DEPTH = 80\n coe = 8\n HIDDEN_DIM = MEMORY_DEPTH * coe\n\n EPOCH = 500\n LR = 0.000058\n BATCH_SIZE = 32\n PRINT_CYCLE = 10\n BRANCH_INDEX = 1\n FIX_PRE_WEIGHT = True\n ACTIVATION = 'relu'\n\n # FLAGS.train = False\n FLAGS.overwrite = True and BRANCH_INDEX == 0\n FLAGS.smart_train = True\n FLAGS.save_best = True and BRANCH_INDEX > 0\n FLAGS.summary = True\n # FLAGS.save_model = False\n FLAGS.snapshot = False\n FLAGS.epoch_tol = 50\n\n # Load data\n train_set, val_set, test_set = load_wiener_hammerstein(\n r'../data/wiener_hammerstein/whb.tfd', depth=MEMORY_DEPTH)\n assert isinstance(train_set, DataSet)\n assert isinstance(val_set, DataSet)\n assert isinstance(test_set, DataSet)\n\n # Get model\n model = lott_lib.mlp00(MARK, MEMORY_DEPTH, HIDDEN_DIM, LR, ACTIVATION)\n\n branch_1_weights = 'FeedforwardNet/branch/linear/weights:0'\n branch_1_bias = 'FeedforwardNet/branch/linear/biases:0'\n branch_2_weights = 'FeedforwardNet/branch2/linear/weights:0'\n branch_2_bias = 'FeedforwardNet/branch2/linear/biases:0'\n # model.nn.variable_assign(branch_1_weights, branch_2_weights)\n # model.nn.variable_assign(branch_1_bias, branch_2_bias)\n with model.nn._graph.as_default():\n variables = tf.trainable_variables()\n b = 1\n # print(model.nn._session.run(variables[2]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[6]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[3]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[7]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[4]))\n # print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # print(model.nn._session.run(variables[5]))\n # a = 1\n\n\n # Train or evaluate\n if FLAGS.train:\n model.identify(train_set, val_set, batch_size=BATCH_SIZE,\n print_cycle=PRINT_CYCLE, epoch=EPOCH,\n branch_index=BRANCH_INDEX, freeze=FIX_PRE_WEIGHT)\n else:\n BRANCH_INDEX = 1\n model.evaluate(train_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n model.evaluate(val_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n model.evaluate(test_set, start_at=MEMORY_DEPTH, branch_index=BRANCH_INDEX)\n\n console.end()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"lottery/lott_script.py","file_name":"lott_script.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"600601835","text":"#Utilizando listas faça um programa que faça 5 perguntas para uma pessoa\n#sobre um crime. As perguntas são:\n#\"Telefonou para a vítima?\"\n#\"Esteve no local do crime?\"\n#\"Mora perto da vítima?\"\n#\"Devia para a vítima?\"\n#\"Já trabalhou com a vítima?\"\n#O programa deve no final emitir uma classificação sobre a\n#participação da pessoa no crime. Se a pessoa responder\n#positivamente a 2 questões ela deve ser classificada como \"Suspeita\",\n#entre 3 e 4 como \"Cúmplice\" e 5 como \"Assassino\".\n#Caso contrário, ele será classificado como \"Inocente\".\n\nresposta = [\"sim\",\"não\"]\nclassi = 0 \npergunta1 = str(input(\"Telefonou para vitimia? \"))\nif pergunta1 == resposta[0]:\n classi += 1\nelse :\n classi = 0\npergunta2 = str(input(\"Esteve no local do crime? \"))\nif pergunta2 == resposta[0]:\n classi += 1\npergunta3 = str(input(\"Mora perto da vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\npergunta4 = str(input(\"Devia para vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\npergunta5 = str(input(\"Já trabalhou com a vitima? \"))\nif pergunta3 == resposta[0]:\n classi += 1\nif classi == 2 :\n print(\"Você é suspeito\")\nelif classi == 3 or classi == 4 :\n print(\"Você foi cúmplice\")\nelif classi == 5 :\n print(\"Você é o criminoso\")\nelif classi == 0 :\n print(\"Você é inocente\")\n","sub_path":"Python-exercicios/questao14.py","file_name":"questao14.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"410488554","text":"class Solution:\n # Your task is to complete this function\n # Function should return an integer\n def countPS(self,s):\n n = len(s)\n dp = [[0]*n for _ in range(n)]\n \n for i in range(n):\n for j in range(i, -1, -1):\n if i == j:\n dp[j][i] = 1\n else:\n if s[i] == s[j]:\n dp[j][i] = dp[j+1][i] + dp[j][i-1] + 1\n else:\n dp[j][i] = dp[j+1][i] + dp[j][i-1] - dp[j+1][i-1]\n return dp[0][n-1]%(10**9+7)\n\n\n#{ \n # Driver Code Starts\n#Initial template for Python 3\n\nimport sys\nsys.setrecursionlimit(10**6)\n\nif __name__=='__main__':\n t = int(input())\n for i in range(t):\n ob=Solution()\n print(ob.countPS(input().strip()))\n\n# } Driver Code Ends","sub_path":"GeeksForGeeks/Practice/arrays/count_palindrome_subsequence.py","file_name":"count_palindrome_subsequence.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"86222962","text":"from crispy_forms.layout import Div\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext as _\nfrom geotrek.common.forms import CommonForm\nfrom geotrek.outdoor.models import Site, Course, OrderedCourseChild\n\n\nclass SiteForm(CommonForm):\n orientation = forms.MultipleChoiceField(choices=Site.ORIENTATION_CHOICES, required=False)\n wind = forms.MultipleChoiceField(choices=Site.ORIENTATION_CHOICES, required=False)\n\n geomfields = ['geom']\n\n fieldslayout = [\n Div(\n 'structure',\n 'name',\n 'parent',\n 'review',\n 'published',\n 'practice',\n 'type',\n 'description_teaser',\n 'ambiance',\n 'description',\n 'advice',\n 'period',\n 'orientation',\n 'wind',\n 'labels',\n 'themes',\n 'information_desks',\n 'web_links',\n 'portal',\n 'source',\n 'managers',\n 'eid',\n )\n ]\n\n class Meta:\n fields = ['geom', 'structure', 'name', 'review', 'published', 'practice', 'description',\n 'description_teaser', 'ambiance', 'advice', 'period', 'labels', 'themes',\n 'portal', 'source', 'information_desks', 'web_links', 'type', 'parent', 'eid',\n 'orientation', 'wind', 'managers']\n model = Site\n\n def __init__(self, site=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['parent'].initial = site\n if self.instance.pk:\n descendants = self.instance.get_descendants(include_self=True).values_list('pk', flat=True)\n self.fields['parent'].queryset = Site.objects.exclude(pk__in=descendants)\n if self.instance.practice:\n for scale in self.instance.practice.rating_scales.all():\n for bound in ('max', 'min'):\n ratings = getattr(self.instance, 'ratings_' + bound).filter(scale=scale)\n fieldname = 'rating_scale_{}{}'.format(bound, scale.pk)\n self.fields[fieldname] = forms.ModelChoiceField(\n label=\"{} {}\".format(scale.name, bound),\n queryset=scale.ratings.all(),\n required=False,\n initial=ratings[0] if ratings else None\n )\n self.fieldslayout[0].insert(10, fieldname)\n\n def save(self, *args, **kwargs):\n site = super().save(self, *args, **kwargs)\n\n # Save ratings\n if site.practice:\n for bound in ('min', 'max'):\n field = getattr(site, 'ratings_' + bound)\n to_remove = list(field.exclude(scale__practice=site.practice).values_list('pk', flat=True))\n to_add = []\n for scale in site.practice.rating_scales.all():\n rating = self.cleaned_data.get('rating_scale_{}{}'.format(bound, scale.pk))\n if rating:\n to_remove += list(field.filter(scale=scale).exclude(pk=rating.pk).values_list('pk', flat=True))\n to_add.append(rating.pk)\n else:\n to_remove += list(field.filter(scale=scale).values_list('pk', flat=True))\n field.remove(*to_remove)\n field.add(*to_add)\n\n return site\n\n\nclass CourseForm(CommonForm):\n children_course = forms.ModelMultipleChoiceField(label=_(\"Children\"),\n queryset=Course.objects.all(), required=False,\n help_text=_(\"Select children in order\"))\n hidden_ordered_children = forms.CharField(label=_(\"Hidden ordered children\"),\n widget=forms.widgets.HiddenInput(),\n required=False)\n\n geomfields = ['geom']\n\n fieldslayout = [\n Div(\n 'structure',\n 'name',\n 'site',\n 'review',\n 'published',\n 'description',\n 'advice',\n 'equipment',\n 'height',\n 'children_course',\n 'eid',\n 'hidden_ordered_children',\n )\n ]\n\n class Meta:\n fields = ['geom', 'structure', 'name', 'site', 'review', 'published', 'description',\n 'advice', 'equipment', 'height', 'eid', 'children_course', 'hidden_ordered_children']\n model = Course\n\n def __init__(self, site=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['site'].initial = site\n if self.instance.pk and self.instance.site and self.instance.site.practice:\n for scale in self.instance.site.practice.rating_scales.all():\n ratings = self.instance.ratings.filter(scale=scale)\n fieldname = 'rating_scale_{}'.format(scale.pk)\n self.fields[fieldname] = forms.ModelChoiceField(\n label=scale.name,\n queryset=scale.ratings.all(),\n required=False,\n initial=ratings[0] if ratings else None\n )\n self.fieldslayout[0].insert(5, fieldname)\n if self.instance:\n queryset_children = OrderedCourseChild.objects.filter(parent__id=self.instance.pk).order_by('order')\n # init multiple children field with data\n self.fields['children_course'].queryset = Course.objects.exclude(pk=self.instance.pk)\n self.fields['children_course'].initial = [c.child.pk for c in self.instance.course_children.all()]\n # init hidden field with children order\n self.fields['hidden_ordered_children'].initial = \",\".join(str(x) for x in queryset_children.values_list('child__id', flat=True))\n\n def clean_children_course(self):\n \"\"\"\n Check the course is not parent and child at the same time\n \"\"\"\n children = self.cleaned_data['children_course']\n if children and self.instance and self.instance.course_parents.exists():\n raise ValidationError(_(\"Cannot add children because this course is itself a child.\"))\n for child in children:\n if child.course_children.exists():\n raise ValidationError(_(\"Cannot use parent course {name} as a child course.\".format(name=child.name)))\n return children\n\n def save(self, *args, **kwargs):\n course = super().save(self, *args, **kwargs)\n\n # Save ratings\n if course.site and course.site.practice:\n to_remove = list(course.ratings.exclude(scale__practice=course.site.practice).values_list('pk', flat=True))\n to_add = []\n for scale in course.site.practice.rating_scales.all():\n rating = self.cleaned_data.get('rating_scale_{}'.format(scale.pk))\n if rating:\n to_remove += list(course.ratings.filter(scale=scale).exclude(pk=rating.pk).values_list('pk', flat=True))\n to_add.append(rating.pk)\n else:\n to_remove += list(course.ratings.filter(scale=scale).values_list('pk', flat=True))\n course.ratings.remove(*to_remove)\n course.ratings.add(*to_add)\n\n # Save children\n ordering = []\n if self.cleaned_data['hidden_ordered_children']:\n ordering = self.cleaned_data['hidden_ordered_children'].split(',')\n order = 0\n # add and update\n for value in ordering:\n child, created = OrderedCourseChild.objects.get_or_create(parent=self.instance,\n child=Course.objects.get(pk=value))\n child.order = order\n child.save()\n order += 1\n # delete\n new_list_children = self.cleaned_data['children_course'].values_list('pk', flat=True)\n for child_relation in self.instance.course_children.all():\n # if existant child not in selection, deletion\n if child_relation.child_id not in new_list_children:\n child_relation.delete()\n\n return course\n","sub_path":"geotrek/outdoor/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"302367399","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n#This program is built to simulate the trajectories of atomic/molecular\r\n# beams through irregular channels while having the chance to be \r\n#adsorbed by the side wall and reemitted in a new direction with a new\r\n#velocity\r\n\r\nh=100\r\nw=100\r\nNum=100\r\nT=273\r\ntimesteps=5000\r\n\r\ncoords=np.zeros((timesteps,2))\r\n\r\n\r\nBC=np.ones((h,w))\r\nBC1=np.ones((h,w))\r\nn=np.ones((h,w,2))\r\n\r\n\r\ndef create_path(x_start,x_finish,y_start,y_finish,val):\r\n\tlx=(x_finish-x_start)\r\n\tly=(y_finish-y_start)\r\n\tls=math.sqrt(lx**2+ly**2)\r\n\tslope=ly/(0.000001+lx)\r\n\tif lx>ly:\r\n\t\tfor i in range(lx):\r\n\t\t\tBC[int(y_start+(i*slope)//1)][i+int(x_start)]=val\r\n\t\t\tBC1[int(y_start+(i*slope)//1)][i+int(x_start)]=val\r\n\telse:\r\n\t\tfor j in range(ly):\r\n\t\t\tBC[int(y_start+j)][int(x_start+(j/slope)//1)]=val\r\n\t\t\tBC1[int(y_start+j)][int(x_start+(j/slope)//1)]=val\r\n\r\n\r\nfor j in range(10):\r\n\tcreate_path(5,20,j,j,0)\r\nfor j in range(10):\r\n\tcreate_path(5+j,5+j,10,90,0)\r\nfor j in range(10):\r\n\tcreate_path(5,80,90-j,90-j,0)\r\nfor j in range(10):\r\n\tcreate_path(75+j,75+j,10,90,0)\r\nfor j in range(10):\r\n\tcreate_path(10,75+j,10,10+j,0)\r\n\r\ndef MBDist(T):\r\n\treturn np.random.random()\r\n\r\nclass Particle:\r\n\tdef __init__(self,T):\r\n\t\tself.temp=T\r\n\t\tself.v=np.random.rand()\r\n\t\tself.theta=math.pi/2.8\r\n\t\tself.x=7\r\n\t\tself.y=7\r\n\t\tself.vx=0.11*math.cos(self.theta)\r\n\t\tself.vy=0.11*math.sin(self.theta)\r\n\r\nRb=Particle(10)\r\n\r\n#To propagate a particle into the channel, it is necessary to rescale the \r\n# motion the characteristic cell size i.e. 1. Thus, the larger value of the\r\n#x or y momentum is rescaled to a box and the other is corresponding #sine(theta) or cosine(theta). This allows the particle to move in increments #of the BC\r\n\r\ndef Reflect(d):\r\n\tif d==\"y\":\r\n\t\tRb.vy*=-1\r\n\t\tprint(\"reflected y!\")\r\n\tif d==\"x\":\r\n\t\tRb.vx*=-1\r\n\t\tprint(\"reflected x!\")\r\n\t\r\ndef calc_norm_vectors():\r\n\tfor i in range(h):\r\n\t\tfor j in range(w):\r\n\t\t\tif BC[i][j]==0:\r\n\t\t\t\tif BC[i+1][j]==1:\r\n\t\t\t\t\tn[i+1][j]=[0,1]\r\n\t\t\t\tif BC[i-1][j]==1:\r\n\t\t\t\t\tn[i-1][j]=[0,1]\r\n\t\t\t\tif BC[i][j+1]==1:\r\n\t\t\t\t\tn[i][j+1]=[1,0]\r\n\t\t\t\tif BC[i][j-1]==1:\r\n\t\t\t\t\tn[i][j-1]=[1,0]\r\n\t#for i in range(h-1):\r\n\t#\tfor j in range(w-1):\r\n\t#\t\tif BC[i][j]==1:\r\n\t#\t\t\tif (n[i][j][0])**2+(n[i][j][1])**2>0:\r\n\t#\t\t\t\tnavgx=0\r\n\t#\t\t\t\tnavgy=0\r\n\t#\t\t\t\tnorm=0\r\n\t#\t\t\t\tnavgx+=n[i][j-1][0]\r\n\t#\t\t\t\tnavgy+=n[i][j-1][1]\r\n\t#\t\t\t\tnavgx+=n[i][j+1][0]\r\n\t#\t\t\t\tnavgy+=n[i][j+1][1]\t\t\t\t\t\r\n\t#\t\t\t\tnavgx+=n[i+1][j][0]\r\n\t#\t\t\t\tnavgy+=n[i+1][j][1]\r\n\t#\t\t\t\tnavgx+=n[i-1][j][0]\r\n\t#\t\t\t\tnavgy+=n[i-1][j][1]\r\n\t#\t\t\t\tnormn=navgx**2+navgy**2\r\n\t#\t\t\t\tn[i][j]=[navgx/normn,navgy/norm]\r\n\r\ncalc_norm_vectors()\r\n\r\ndef propagator():\r\n\tcount=0\r\n\t#while Rb.x|<|!|\\.|@|#|\\$|\\*|:|%|\\+|…|\\\\\\\\|\\/|«|»|···|\\||\\•|\\?|\\(|\\)|=|-|&|;|\\_|—|~|¯|\\{|\\}|\\[|\\]|£|€|¥|¿|–\", \"\", line)\n line = re.sub(\"\\“|\\”|\\‘|\\’|\\\"|,|'\", \" \", line)\n line = re.sub(\"[0-9]+|http[a-zA-Z0-9]+\", \" \", line)\n line = line.lower()\n line = re.sub(\" [a-z] |aa+[a-z]* | ab | aba | abc | ac | acc | acq | az | ba | baa* | ca | czq | czt | da | daca | ec | ed | rt | co \", \" \", line)\n line = re.sub(\" amp | get | pi | marc | someon | talking | speaking | ever | done | less \", \" \", line)\n \n stop_words = (stopwords.words('english'))\n bad_words = ['if','who', 'would', 'the', 'are', 'said', 'i', 'in', 'it', 'a', 'u', 'm', 're', \n 'them', 'they', 'there', 'should', 'over', 'an', 'via', 'up', 'at', 'is', 'as',\n 'was', 'him', 'he', 'can', 'did', 'go', 'by', 'us', 'our', 'their', 'or', 'how', \n 'now', 'but', 'give', 'my', 'so', 'be', 'out', 'its', 'and', 'any', 'all', 'got', 'then', 'you',\n 'these', 'say', 'on', 'not', 'some', 'me', 'those', 'to', 'of', 'for', 'we', \n 'why', 'like','https','with','from','para','more','this','when','just','about',\n 'that','what','have','will','your', 'must', 'which', 'pathfinder', 'vbtn', 'msfppreload', 'msfpnav',\n 'also', 'united', 'states', 'color','vallejo', 'cyndi', 'service', 'last', 'same', 'mind', 'fl',\n 'msfphover', 'hippo', 'quinny', 'dreami', 'many', 'even', 'cenicola', 'than',\n 'whether', 'office', 'read']\n \n word_tokens = word_tokenize(line)\n word_tokens = [w.lower() for w in word_tokens]\n\n filtered_sentence = [w for w in word_tokens if not w in stop_words] \n filtered_sentence = [w for w in word_tokens if not w in bad_words]\n\n# for word in filtered_sentence:\n# print('%s\\t%s' % (word, \"1\"))\n \n for word in coocc(filtered_sentence):\n print('%s %s\\t%s' % (word[0],word[1], \"1\"))","sub_path":"part3/Twitter/Code/Data/mapper_coocc.py","file_name":"mapper_coocc.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"442160863","text":"# encoding=utf-8\n# coding=utf-8\nimport requests\nimport re\nimport time\nfrom bs4 import BeautifulSoup\nimport random\nimport datetime\nimport os\nimport json\n\nimport sys\n\nheaders = [\n {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n },\n {\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\n]\n\nday = datetime.datetime.now().strftime('%Y%m%d')\nlog_file = day+\"_log.txt\"\nrpc_url = \"http://localhost:6800/jsonrpc\"\n\n# 网站根目录的url\nroot_url = 'https://18comic.one'\nindex_url = 'https://18comic.one/albums/doujin?o=tf'\nlist_prefix = \"\"\ntotalPageNumber = 0\nimages_pattern = ''\nimage_pattern = ''\n\nscript_tmp_path = os.getcwd()\nscript_tmp_name = \"tmp.html\"\n\n# 保存图片的根路径\nbase_save_dir = 'D:\\\\manhua\\\\'\n\n# 日志处理\ndef log(value,print_flag = True):\n logfile = open(log_file, 'a', encoding='utf-8')\n if logfile.writable():\n now_data = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n log_message = \"时间:{} : log : {}\\n\".format(now_data, value)\n if print_flag:\n print(log_message)\n logfile.write(log_message)\n try:\n logfile.close()\n except IOError:\n print(\"写入日志错误\")\n else:\n return\n\n\ndef addDownloadTask(url,dir,out):\n postdata = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"QXJpYU5nXzE1NDgzODg5MzhfMC4xMTYyODI2OTExMzMxMzczOA==\"\n }\n rpc_request = postdata\n rpc_request[\"method\"] = \"aria2.addUri\"\n # rpc 的选项,去掉--就可以了\n options = {\n \"dir\":dir,\n \"out\":out,\n \"allow-overwrite\":\"true\"\n }\n rpc_request[\"params\"] = [[url],options]\n response = requests.post(url=rpc_url, json=rpc_request)\n if response.status_code == 200:\n result = response.json().get(\"result\", [])\n print(\"gid: {}\".format(result))\n return result\n else:\n log(\"无法调用aria2\")\n\ndef download_status(gid):\n postdata = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"QXJpYU5nXzE1NDgzODg5MzhfMC4xMTYyODI2OTExMzMxMzczOA==\"\n }\n rpc_request = postdata\n rpc_request[\"method\"] = \"aria2.tellStatus\"\n rpc_request[\"params\"] = [gid]\n response = requests.post(url=rpc_url,json=rpc_request)\n if response.status_code == 200:\n result = response.json().get(\"result\",\"\")\n if result != \"\":\n status = result.get(\"status\")\n if status != \"\":\n return status\n return None\n\n\"\"\"\n下载工具\nurl: 下载地址\ndir: 保存路径\nout: 保存名称\n\"\"\"\ndef download(url,dir,out):\n log(\"开始下载:{}\".format(url))\n gid = addDownloadTask(url,dir,out)\n status = download_status(gid)\n error = False\n error_num = 0\n while True and not error:\n if status == \"active\":\n time.sleep(3)\n print(\"下载中.....\\n\")\n status = download_status(gid)\n if status == \"complete\":\n break\n elif status == \"waiting\":\n log(\"下载队列已满\")\n time.sleep(4)\n status = download_status(gid)\n elif status == \"paused\":\n log(\"暂停下载\")\n break\n elif status == \"error\":\n log(\"下载错误\")\n if error_num == 3:\n error = True\n break\n else:\n log(\"重新下载\")\n gid = addDownloadTask(url,dir,out)\n status = download_status(gid)\n error_num = error_num + 1\n \n elif status == \"removed\":\n log(\"已经从下载队列中移除\")\n break\n if error:\n log(\"下载:{}出错\".format(url))\n return -1\n else:\n log(\"下载{}成功\".format(url))\n return 0\n\n# 获得首页内容\n# 返回页面的list\ndef get_index_info(url):\n result = []\n if True:\n response_data = htmlContent(url)\n soup = BeautifulSoup(response_data,'html.parser')\n plist = soup.select('#wrapper > div.container > div > div.col-xs-12.col-md-9.col-sm-8 > div.row > div > div > a')\n for item in plist:\n p_url = root_url + item.get(\"href\")\n result.append(p_url)\n return result\n\n\n# 下载图片\n# url 图片的地址\n# save_dir 保存的路径\n# 保存的名称\ndef download_image(url,save_dir,filename):\n download(url=url,dir=save_dir,out=filename)\n\n\"\"\"\n获得html文件的内容\n\"\"\"\ndef htmlContent(url):\n status = download(url=url,dir=script_tmp_path,out=script_tmp_name)\n data = None\n if status == 0:\n # 读取文件\n with open(script_tmp_path+\"\\\\\"+script_tmp_name,\"r\",encoding=\"utf-8\") as f:\n data = f.read()\n f.close()\n return data\n\n# 获得html中的图片的url\ndef get_image_url(url):\n imgurl = None\n try:\n if True:\n response_data = htmlContent(url)\n imgs = re.findall(images_pattern,response_data,re.S)\n # 启用备用解析\n if len(imgs) ==0 :\n bpattern = '
'\n imgs = re.findall(bpattern,response_data,re.S)\n # print(imgs)\n if len(imgs) > 0:\n # image_pattern = 'src=\"http://lf.veestyle.cn/uploads/.*?\"'\n urls = re.findall(image_pattern,imgs[0])\n if len(urls) > 0:\n imgurl = urls[0].split(\"src=\")[-1].replace('\\\"',\"\")\n print('图片url: {}'.format(imgurl))\n elif len(urls) == 0:\n # 启用备用解析\n urls = re.findall('src=\"http://lf.mz0731.com/uploads/.*?\"',imgs[0])\n imgurl = urls[0].split(\"src=\")[-1].replace('\\\"',\"\")\n # 替换成当前的地址\n imgurl = imgurl.replace(\"http://lf.mz0731.com/\",root_url)\n print(\"备用解析图片url:{}\".format(imgurl))\n\n except RuntimeError:\n log('请求:{} 异常'.format(url))\n else:\n return imgurl\n return imgurl\n\n# 获得一本漫画的名称和url\n# 第一个名称\ndef get_single_pic(url):\n url = url.replace(\"album\",\"photo\")\n if True:\n print(url)\n response_data = htmlContent(url)\n if response_data is None:\n log(\"!!!获取url:{}失败\".format(url))\n return\n soup = BeautifulSoup(response_data,'html.parser')\n titles = soup.select('#wrapper > div.top-nav.visible-xs > div > ul > span')\n if len(titles) == 0:\n log(\"!!!获取url:{}失败\".format(url))\n return\n title = titles[0].string\n title = title.replace(\":\",\"\")\n title = title.replace(\";\",\"\")\n title = title.replace(\"\\\\\",\"\").replace(\"/\",\"\").replace(\"*\",\"\").replace(\"?\",\"\").replace(\"\\\"\",\"\").replace(\"<\",\"\").replace(\">\",\"\").replace(\"|\",\"\")\n save_dir = base_save_dir + title + \"\\\\\"\n print(\"save dir:{}\".format(save_dir))\n # 创建该漫画的保存的文件夹\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n downloaded_list = []\n waiting_download_list = re.findall(\"https://cdn-msp.18comic.one/media/photos/.*?/.*?.jpg\",response_data,re.S)\n print(waiting_download_list)\n with open(\"imagelist.json\",\"r\") as f:\n downloaded_list = json.loads(f.read())\n f.close()\n for image_url in waiting_download_list:\n if image_url is None:\n continue\n if image_url in downloaded_list:\n print(\"已经下载:{}\".format(image_url))\n continue\n new_image_name = str(image_url).split(\"/\")[-1]\n download_image(image_url,save_dir,new_image_name)\n downloaded_list.append(image_url)\n time.sleep(3 + random.random())\n # 完成一个之后\n with open(\"imagelist.json\",\"w\") as f:\n f.write(json.dumps(downloaded_list))\n f.close()\n\nif __name__ == '__main__':\n # init()\n # 两个核心功能\n # 已经下载的列表\n downloaded_list = []\n with open('list.json','r') as f:\n json_data = f.read()\n f.close()\n downloaded_list = json.loads(json_data)\n\n # 1、根据主页获得漫画的列表\n totalPageNumber = 121\n for i in range(1, totalPageNumber):\n new_list_url = index_url + '&page={}'.format(i)\n print('列表url:{}'.format(new_list_url))\n url_list = get_index_info(new_list_url)\n print(url_list)\n if len(url_list) == 0:\n print(\"列表数据为空\")\n break\n for item in url_list:\n pic_url = item\n if pic_url in downloaded_list:\n print('{} 已经下载'.format(pic_url))\n continue\n else:\n # 未下载,下载\n print(\"未下载:{}\".format(pic_url))\n get_single_pic(pic_url)\n # 下载完成,写入文件\n downloaded_list.append(pic_url)\n #\n print('写入文件')\n with open('list.json','w') as f:\n f.write(json.dumps(downloaded_list))\n f.close()\n time.sleep(1+random.random())","sub_path":"Python/manhua/benzi18/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"135430695","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n # context = {'city': '北京'}\n # return render(request, 'index.html', context)\n return render(request, 'index.html', {'city': '北京1'}) # 也可以这样写\n\n\nfrom django.views import View\n\n\nclass HomeView(View):\n def get1(self, request):\n\n return render(request, 'index.html', {\n \"title\": '新的标题',\n 'tuple': (1, 2, 3, 4,),\n 'list1': ['a', 'b', 'c', ],\n 'dict1': {'name': 'alnk', 'age': 18, },\n 'num': 100,\n })\n\n\n def get2(self, request):\n\n return render(request, 'index2.html', {\n 'tuple': (1, 2, 3, 4,),\n 'list1': ['a', 'b', 'c', ],\n 'dict1': {'name': 'alnk', 'age': 18, },\n })\n\n def get3(self, request):\n\n return render(request, 'index3.html', {\n 'book_list': [\n {'name': 'python', 'price': 99},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n {'name': 'go', 'price': 69},\n ]\n })\n\n # def get(self, request):\n # # forloop.parentloop.counter 会继承上一个for循环的计数规则\n # return render(request, 'index4.html', {\n # 'people': [\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # {'name':'alnk', 'age': 19, 'love': ['睡觉', '吃饭', '打豆豆']},\n # ]\n # })\n\n def get(self, request):\n \"\"\"过滤器\"\"\"\n from datetime import datetime\n return render(request, \"index5.html\", {\n \"title\": \"welcome to django\",\n 'title2': '大标题
',\n 'title3': \"小可爱\",\n 'date_time': datetime.now(),\n 'str1': 'welcome to django',\n 'str2': '我爱中国 welcome to django'\n })\n\n\n# class IndexView(View):\n# \"\"\"模板继承\"\"\"\n# def get(self, request):\n# return render(request, \"index/index.html\")\n#\n#\n# class List(View):\n# def get(self, request):\n# return render(request, \"index/list.html\")\n\n\nclass IndexView(View):\n \"\"\"模板继承\"\"\"\n def get(self, request):\n return render(request, \"exten/index.html\")\n\n\nclass List(View):\n def get(self, request):\n return render(request, \"exten/list.html\")\n\n\n\"\"\"表单系统\"\"\"\nfrom . forms import LoginForm\n\nclass LoginView(View):\n def get(self, request):\n return render(request, 'form.html', {\n 'forms': LoginForm(),\n })\n\n def post(self, request):\n # print(request.POST)\n # print(request.POST.get(\"user\"))\n # print(request.POST.get(\"pwd\"))\n # 提交表单\n # 使用表单系统提供的验证流程\n form = LoginForm(request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n print('到数据库查询账号密码,进行对比')\n print(form)\n return HttpResponse('OK')\n else:\n print(form)\n # return HttpResponse('NO')\n return render(request, 'form.html', {\n 'forms': form\n })\n\n\n\nfrom .forms import UserForm\nclass FormModelView(View):\n def get(self, request):\n return render(request, 'form_model.html', {\n 'form_content': UserForm(),\n })\n\n def post(self, request):\n forms_mode = UserForm(request.POST)\n if forms_mode.is_valid():\n print(forms_mode.data)\n print('查询数据库账号密码')\n return HttpResponse('OK')\n else:\n print(forms_mode.cleaned_data)\n return render(request, 'form_model.html', {\n 'form_content': forms_mode,\n })","sub_path":"17 day17/03 作业day17/djangodemo/temp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"405431361","text":"\"\"\"\nGiven a set of distinct integers, nums,\nreturn all possible subsets.\nNote: The solution set must not contain duplicate subsets.\nFor example,\nIf nums = [1,2,3], a solution is:\n{\n (1, 2),\n (1, 3),\n (1,),\n (2,),\n (3,),\n (1, 2, 3),\n (),\n (2, 3)\n}\n\"\"\"\nfrom algorithms.bit import subsets\nnums = [1,2,3]\n\nprint(subsets(nums))\n\n\n### combination\n\nfrom itertools import combinations\n\nfor i in range(1,len(nums)+1):\n comb=combinations(nums,i)\n for j in comb:\n print(j)\n\n\"\"\"\nthis explanation is from leet_nik @ leetcode\nThis is an amazing solution. Learnt a lot.\nNumber of subsets for {1 , 2 , 3 } = 2^3 .\nwhy ?\ncase possible outcomes for the set of subsets\n 1 -> Take or dont take = 2\n 2 -> Take or dont take = 2\n 3 -> Take or dont take = 2\ntherefore,\ntotal = 2*2*2 = 2^3 = {{}, {1}, {2}, {3}, {1,2}, {1,3}, {2,3}, {1,2,3}}\nLets assign bits to each outcome ->\nFirst bit to 1 , Second bit to 2 and third bit to 3\nTake = 1\nDont take = 0\n0) 0 0 0 -> Dont take 3 , Dont take 2 , Dont take 1 = { }\n1) 0 0 1 -> Dont take 3 , Dont take 2 , take 1 = { 1 }\n2) 0 1 0 -> Dont take 3 , take 2 , Dont take 1 = { 2 }\n3) 0 1 1 -> Dont take 3 , take 2 , take 1 = { 1 , 2 }\n4) 1 0 0 -> take 3 , Dont take 2 , Dont take 1 = { 3 }\n5) 1 0 1 -> take 3 , Dont take 2 , take 1 = { 1 , 3 }\n6) 1 1 0 -> take 3 , take 2 , Dont take 1 = { 2 , 3 }\n7) 1 1 1 -> take 3 , take 2 , take 1 = { 1 , 2 , 3 }\nIn the above logic ,Insert S[i] only if (j>>i)&1 ==true\n{ j E { 0,1,2,3,4,5,6,7 } i = ith element in the input array }\nelement 1 is inserted only into those places where 1st bit of j is 1\nif( j >> 0 &1 ) ==> for above above eg.\nthis is true for sl.no.( j )= 1 , 3 , 5 , 7\nelement 2 is inserted only into those places where 2nd bit of j is 1\nif( j >> 1 &1 ) == for above above eg.\nthis is true for sl.no.( j ) = 2 , 3 , 6 , 7\nelement 3 is inserted only into those places where 3rd bit of j is 1\nif( j >> 2 & 1 ) == for above above eg.\nthis is true for sl.no.( j ) = 4 , 5 , 6 , 7\nTime complexity : O(n*2^n) , for every input element loop traverses\nthe whole solution set length i.e. 2^n\n\"\"\"\n\n","sub_path":"algorithms_practice/4.bit/14.Bit_subsets.py","file_name":"14.Bit_subsets.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"332641985","text":"from collections import Counter\n\ns = input()\nt = input()\n\nsc = Counter(s)\ntc = Counter(t)\n\nFLIP_CASE = ord('a') - ord('A')\n\nyay, whoops = 0, 0\nfor c in s:\n amount = min(sc[c], tc[c])\n if amount > 0:\n yay += amount\n tc[c] -= amount\n sc[c] -= amount\n\nfor c in sc:\n fc = chr(FLIP_CASE ^ ord(c))\n amount = min(sc[c], tc[fc])\n if amount > 0:\n whoops += amount\n tc[fc] -= amount\n\nprint(yay, whoops)\n","sub_path":"codeforces/518/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"479351735","text":"def ceasar_cipher(message, shift):\n the_alphabet = 'abcdefghijklmnopqrstuvwxyz'\n encrypted_message = ''\n for letter in message:\n letter_position = the_alphabet.find(letter.lower())\n index = (letter_position + shift) % len(the_alphabet)\n if letter.lower() not in the_alphabet:\n encrypted_message += letter\n elif letter.isupper():\n encrypted_message += the_alphabet[index].upper()\n else:\n encrypted_message += the_alphabet[index]\n return encrypted_message\n\nmessage = 'abc def hi Jkl mno pqr stu vwx Yz'\nprint(ceasar_cipher(message, -2))","sub_path":"ceasar_cipher.py","file_name":"ceasar_cipher.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"445066343","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nclass Author(models.Model):\n name = models.CharField(max_length=200)\n age = models.IntegerField()\n\n def __unicode__(self):\n return self.name\n\n\nclass Book(models.Model):\n\n title = models.CharField(max_length=200)\n outline = models.TextField(blank=True, null=True)\n users = models.ManyToManyField(User)\n authors = models.ManyToManyField(Author)\n\n def __unicode__(self):\n return self.title\n\n @staticmethod\n def get_book_list(user, page_no, num_per_page):\n books = Book.objects.filter(users__id=user.pk)\n pages = Paginator(books, per_page=num_per_page)\n try:\n return pages.page(page_no)\n except PageNotAnInteger:\n return pages.page(1)\n except EmptyPage:\n return pages.page(pages.num_pages)\n","sub_path":"oauth_dj/oauth_service/books/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"167560860","text":"# Unordered List Sequential Search\n\n\ndef seq_search(arr, ele):\n\n pos = 0\n found = False\n\n while pos < len(arr) and not found:\n\n if arr[pos] == ele:\n found = True\n print(ele)\n else:\n pos += 1\n\n return 'Element Found: ', found\n\n\narr = [1, 2, 3, 4, 5, 6]\n\nprint(seq_search(arr, 5))\n","sub_path":"Search and Sorting/sequential_search_unordered_list.py","file_name":"sequential_search_unordered_list.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"502559589","text":"#!/usr/bin/env python3\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport sys\nimport os\nfrom time import sleep\n\nTIMEOUT = 15\n\ndef message_input(driver):\n message_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"message\"]' ) ))\n message_box.send_keys(\"Hi Papaya!\\n\\nThis message was delivered by a script I wrote using Selenium, which you can find on my github at https://github.com/MooseandSquvirrel/hi_papaya - I used a virtual environment if you'd like to test it with pipenv, make sure to have chromedriver on your PATH.\\nMy name is Andy Gardner and I'm currently interning at 42 Silicon Valley, a non-profit coding school. I applied to your New Grad Software Engineer role for the Selenium script developer. I believe my experience writing Selenium scripts makes a good candidate for this role. So I thought I'd show it with a quick script I just wrote in a couple minutes. I like automating tasks and this roles seems like a great fit. \\n\\nHope to hear from you soon!\\n\\nBest,\\nAndy :)\")\n\ndef email_input(email, driver):\n email_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/div[1]/div[2]/div/input' ) ))\n email_box.send_keys(email)\n\ndef name_input(full_name, driver):\n\temail_box = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/div[1]/div[1]/div/input' ) ))\n\temail_box.send_keys(full_name)\n\ndef click_send(driver):\n\tbutton = WebDriverWait(driver, TIMEOUT).until(EC.visibility_of_element_located( ( By.XPATH, '//*[@id=\"contact-form\"]/button' ) ))\n\tbutton.click()\n\tsleep(20)\n\ndef input_info(full_name, email, driver):\n name_input(full_name, driver)\n email_input(email, driver)\n message_input(driver)\n click_send(driver)\n\ndef commandline():\n full_name_check = ''\n email_check = ''\n while full_name_check != 'y':\n full_name = input(\"Enter your full name: \")\n full_name_check = input(f\"You entered {full_name}, is this correct? (y or n): \")\n while email_check != 'y':\n email = input(\"Enter your email: \")\n email_check = input(f\"You entered {email}, is this correct? (y or n): \")\n return full_name, email\n \ndef main():\n full_name, email = commandline()\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(\"https://papayapay.com/contact\")\n input_info(full_name, email, driver)\n\nif __name__== \"__main__\":\n main()","sub_path":"hi.py","file_name":"hi.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"323194496","text":"from pathlib import Path\nimport pandas as pd\nfrom warnings import warn\n\nroot_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/04_complete\")\nout_path = Path(\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/aggregated_data\")\nout_path.mkdir(exist_ok=True)\n\nfiles = sorted(root_path.glob(\"*quest*.xlsx\"))\ndf_out = pd.DataFrame()\n\n\ndef excel_letter_to_num(l):\n from string import ascii_lowercase\n letter_lut = {letter: index for index, letter in enumerate(ascii_lowercase, start=0)}\n return letter_lut[l.lower()]\n\n\ndef extract_data_via_mapping(file, lut27, lut29, sheet=\"01_Veränderungsfragebogen\", row_offset=-2):\n df_out = pd.DataFrame()\n\n # encoding breaks load code\n sheet_ = 4 if sheet == \"03_Kardivaskulär\" else sheet\n\n df_in = pd.read_excel(file, sheet_name=sheet_)\n\n # cardio comes in different formats 29 and 27 lines\n # (this is because some files dont have the Keine der genannten Behandlungen cells\n\n if len(df_in) == 29:\n lut = lut29.dropna(axis=\"index\", how=\"all\")\n elif len(df_in) == 27:\n lut = lut27.dropna(axis=\"index\", how=\"all\")\n else:\n raise Exception(file, len(df_in))\n\n for _, row in lut.iterrows():\n name, col_idx, row_idx = row[\"variable_short_engl\"], excel_letter_to_num(row[\"value_col\"]), \\\n int(row[\"value_row\"]) + row_offset\n df_out = df_out.append(pd.DataFrame({\"variable\": name, \"value\": df_in.iloc[row_idx, col_idx]}, index=[0]))\n df_out = df_out.set_index(\"variable\").T\n\n return df_out\n\n\nsheet = \"03_Kardivaskulär\"\n\nlut_file = Path(\n f\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/mapping/mapping_{sheet}_27.xlsx\")\nlut_27 = pd.read_excel(lut_file)\nlut_file = Path(\n f\"/Volumes/lhab_public/03_Data/04_data_questionnaires/00_rawdata_tp6/mapping/mapping_{sheet}_29.xlsx\")\nlut_29 = pd.read_excel(lut_file)\n\ndfs = []\nfor f in files:\n id = pd.read_excel(f, sheet_name=\"ID\", usecols=\"A:B\", names=[\"variable\", \"value\"], header=None)\n id.dropna(axis=\"index\", how=\"all\", inplace=True)\n id = id.set_index(\"variable\").T\n id[\"file\"] = f\n\n df1 = extract_data_via_mapping(f, lut_27, lut_29, sheet=sheet)\n df = pd.concat((id, df1), axis=1)\n dfs.append(df)\n\ndf_out = pd.concat(dfs, axis=0, sort=False)\ndf_out.to_excel(out_path / f\"00_aggregated_{sheet}.xlsx\", index=False)\n","sub_path":"scripts/aggregate_tp6/aggregate_quest_cardio_tp6.py","file_name":"aggregate_quest_cardio_tp6.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"594234735","text":"import pytest\nimport sys\nimport random\nimport string\nimport json\n\nfrom app import create_app\n\ndef random_string_generator():\n allowed_chars = string.ascii_letters + string.punctuation\n size = 12\n return ''.join(random.choice(allowed_chars) for x in range(size))\n\nusername = random_string_generator()\npassword = random_string_generator()\n\nprint(\"Testing with \", username, \"as Admin\")\n\n@pytest.fixture\ndef client():\n app = create_app(\"test_config\")\n app.config['TESTING'] = True\n\n with app.test_client() as client:\n yield client\n\ndef test_register_admin(client):\n \"\"\"Make sure register works.\"\"\"\n\n res = register_as_admin(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"User registered.\\\"}\" in res\n\ndef test_login_admin(client):\n \"\"\"Make sure login works.\"\"\"\n\n res = login(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Logged in\\\"}\" in res\n\ndef test_read_admin(client):\n \"'Make sure crud/read works'\"\n login(client)\n res = read(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"crud/read msg\\\"}\" in res\n\ndef test_create_update_delete_admin(client):\n \"'Make sure crud/create works'\"\n res = create(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"New movie added.\\\"}\" in res\n \n res_json = json.loads(res.decode(\"utf-8\"))\n movie_id = res_json[0].get(\"id\")\n \n res = update(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Changes Saved.\\\"}\" in res\n\n res = delete(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Deleted.\\\"}\" in res\n\ndef test_search_admin(client):\n \"'Make sure crud/delete works'\"\n res = create(client)\n res_json = json.loads(res.decode(\"utf-8\"))\n movie_id = res_json[0].get(\"id\")\n \n res = search(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Search results returned successfully.\\\"}\" in res\n\n res = delete(client, movie_id)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Deleted.\\\"}\" in res\n\ndef test_logout_admin(client):\n \"'Make sure user/logout works'\"\n res = logout(client)\n assert b\"{\\\"status\\\": \\\"success\\\", \\\"msg\\\": \\\"Logged Out\\\"}\" in res\n\ndef logout(client):\n login(client)\n return client.delete('user/logout').data\n\ndef search(client):\n login(client)\n return client.get('search/movies?name=movie&director=director').data\n\ndef delete(client, movie_id):\n login(client)\n return client.delete('crud/delete/' + str(movie_id)).data\n\n\ndef update(client, movie_id):\n login(client)\n return client.patch('crud/update/' + str(movie_id), json={\n \"name\" : \"movie\",\n \"director\" : \"director\",\n \"99popularity\": 50.0, \n \"genre\": [\"Action\"],\n \"imdb_score\": 5.0\n }).data\n\ndef create(client):\n login(client)\n return client.post('crud/create', json={\n \"name\" : \"movie\",\n \"director\" : \"director\",\n \"99popularity\": 50.0, \n \"genre\": [\"Action\"],\n \"imdb_score\": 5.0\n }).data\n\n\ndef read(client):\n return client.get(\"crud/read\").data\n\ndef register_as_admin(client):\n return client.post('user/register', json={\n \"user_name\" : username,\n \"password\" : password,\n \"user_role\" : \"admin\"\n }).data\n\ndef login(client):\n return client.post('user/login', json={\n \"user_name\" : username,\n \"password\" : password\n }).data\n\n\n ","sub_path":"tests/test_as_admin.py","file_name":"test_as_admin.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"422087527","text":"# Definition for a binary tree node.\r\nclass TreeNode(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n#class Solution:\r\n# def levelOrderBottom(self, root):\r\n# \"\"\"\r\n# :type root: TreeNode\r\n# :rtype: List[List[int]]\r\n# \"\"\"\r\n# # BFS+stack\r\n# leverTraversal=[]\r\n# if not root:\r\n# return leverTraversal\r\n# currLevel=[root]\r\n# while currLevel:\r\n# nextLevel=[] # use two containers, then swap curr and next\r\n# val=[]\r\n# for node in currLevel:\r\n# val.append(node.val)\r\n# if node.left:\r\n# nextLevel.append(node.left)\r\n# if node.right:\r\n# nextLevel.append(node.right)\r\n# currLevel=nextLevel\r\n# leverTraversal.append(val)\r\n# return leverTraversal[::-1]\r\n \r\n#class Solution:\r\n# def levelOrderBottom(self, root):\r\n# \"\"\"\r\n# :type root: TreeNode\r\n# :rtype: List[List[int]]\r\n# \"\"\"\r\n# # DFS+stack \r\n# levelTraversal=[]\r\n# self.preorder(root,0,levelTraversal)\r\n# levelTraversal.reverse() # bottom-up level order\r\n# return levelTraversal\r\n# \r\n# def preorder(self,root,level,res):\r\n# if root:\r\n# if len(res) {url['url']}: {url['time']}\"\n return {\"url\": url, \"rep\": rep, \"_id\": url_id}\n\n def edit(self, object_id):\n doc = self.get_document(object_id)\n print(doc['rep'])\n edit = input(\"Change alias or url: [a/u]: \")\n if edit.upper() == \"A\":\n new_alias = input(\"New alias: \")\n self.database.urls.find_one_and_update(\n doc['_id'],\n {\"$set\": {\"alias\": new_alias}}\n )\n print(new_alias)\n elif edit.upper() == \"U\":\n new_url = input(\"New url: \")\n new_url = self.database.valid_url(new_url)\n self.database.urls.find_one_and_update(\n doc['_id'],\n {\"$set\": {\"url\": new_url}}\n )\n print(new_url)\n\n def delete_document(self, object_id):\n doc = self.get_document(object_id)\n delete = input(f\"{doc['rep']}\\nDelete the document [Y/n]: \")\n if delete.upper() == \"Y\":\n print(self.database.urls.delete_one(doc['_id']))\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", help=\"Edit with object id\")\n parser.add_argument(\"-d\", help=\"Delete delete document by id\")\n parser.add_argument(\"-a\", help=\"All\", action='store_true')\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n editer = Editer()\n command = parse()\n if command.e is not None:\n editer.edit(command.e)\n if command.d is not None:\n editer.delete_document(command.d)\n if command.a:\n editer.read_all()\n","sub_path":"editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"269411203","text":"# https://www.acmicpc.net/problem/1517\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\narray = list(map(int, input().split()))\nswap = 0\n\ndef mergeSort(array, s, e):\n\tglobal swap\n\tif s + 1 < e :\n\t\tmid = (s+e)//2\n\t\tsortedLeftArray = mergeSort(array,s,mid)\n\t\tsortedRightArray= mergeSort(array,mid,e)\n\t\ti = 0\n\t\tj = 0\n\t\tnewArray = [0]*(e-s)\n\t\tfor k in range(e-s):\n\t\t\tif i < mid-s and (j == e-mid or sortedLeftArray[i] <= sortedRightArray[j]):\n\t\t\t\tnewArray[k] = sortedLeftArray[i]\n\t\t\t\ti += 1\n\t\t\telse :\n\t\t\t\tnewArray[k] = sortedRightArray[j]\n\t\t\t\tif k < mid+j :\n\t\t\t\t\tswap += mid+j-k-s\n\t\t\t\tj += 1\n\telse :\n\t\tnewArray = array[s:e]\n\treturn newArray\nmergeSort(array,0,n)\nprint(swap)","sub_path":"dojinyou/code_3week/13_1517.py","file_name":"13_1517.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"568666828","text":"Import('_default_env')\n\n\nfiles = Split(\"\"\"\n ../../../Properties/AssemblyInfo.cs\n CrashLogDumper.cs\n Invoker.cs\n Option.cs\n OptionDialog.cs\n StackStatusHandler.cs\n\"\"\")\n\nlib = _default_env.CliLibrary('OssToolkitIos', files, CLILIBS=['mscorlib', 'OssCore', 'monotouch'])\n\nAlias('Lib', lib)\n\ndocs = _default_env.Doxygen('$hardware_dir/share/Docs/Tar/ToolkitIos.tar', files, DOXYGENINPUT='Linn/Toolkit/Ios', DOXYCLEANOUTPUTDIR='$hardware_dir/share/Docs/ToolkitIos', DOXYGENNAMESPACE='OssToolkitIos', DOXYGENEXCLUDE='*.svn*')\nAlias('Docs', docs)\n\n","sub_path":"LibUpnpCil/Toolkit/Linn/Toolkit/Ios/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"367403945","text":"from imageai.Detection import ObjectDetection\nimport os\n\nexecution_path = os.getcwd()\n\n# Create Detection Object\ndetector = ObjectDetection()\ndetector.setModelTypeAsRetinaNet()\ndetector.setModelPath( os.path.join(execution_path , \"resnet50_coco_best_v2.0.1.h5\"))\ndetector.loadModel()\n\n# Name of Target\nimage_name = 'cam_view.jpg'\nimage_target = 'processed_' + image_name\n\n#Run Detection\ndetections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , image_name), output_image_path=os.path.join(execution_path , image_target))\n\n# Creates local images of objects found\n# detections, extracted_images = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , \"image.jpg\"), output_image_path=os.path.join(execution_path , \"imagenew.jpg\"), extract_detected_objects=True)\n\n# Console output\nfor eachObject in detections:\n print(eachObject[\"name\"] , \" : \" , eachObject[\"percentage_probability\"] )","sub_path":"src/first_detection.py","file_name":"first_detection.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"391705312","text":"'''\nCreated on 18 Jun 2019\n\n@author: Kamil\n'''\n'''If you finished the previous project which compared the karma of two new comments, hopefully you learned a thing or two about receiving data from Reddit's API. Now you're going to take this a step further, and even have the opportunity to make a basic twitter bot.\n\n Create a program that receives data from the /r/todayilearned subreddit, and looks for new facts that have been posted.\n Each time the program comes across a new fact, the fact should be printed into the command line. However, phrases like \"TIL \", \"TIL that\", etc should be removed so the only thing that is printed is the fact.\n\nNew TIL API data here\n\nThere are a couple things to note about this since you'll more than likely be using a loop to check for new posts. According to Reddit's API Access Rules Page, the API pages are only updated once every thirty seconds, so you'll have to have your code pause for at least thirty seconds before it tries to find more posts. Secondly, if for some reason you decide to try to get data sooner than every thirty seconds, make sure to not send more than thirty requests per minute. That is the maximum you are allowed to do.\n\nThere is actually a lot you can do once your program starts receiving facts. Instead of simply printing the facts, here are some ideas for what you can do with them. If you currently do not feel like you can accomplish these ideas, feel free to come back later when you have more experience.\n\n Print the link to the source of the fact too.\n Try to further clean up the fact by adding punctuation to the end if it is missing, capitalize the first word, etc.\n Write the facts to a separate text file so you end up with a giant compilation of random facts.\n Create a bot that posts the facts to twitter. This may sound hard, but it's actually pretty simple by using the Python Twitter Tools module and following the guide posted here.\n Remember, the maximum amount of characters you can use in a tweet is only 280, so you'll have to filter out facts that are longer than that.\n By now you should be pretty familiar with python, so if you get ideas for improving your program, go for it!\n'''\n'''\nMY BOT:\nhttps://twitter.com/TodayILearnedb1\n'''\nimport requests\nimport json\nimport twitter\n\nclass TodayILearned():\n def __init__(self):\n self.link = 'https://www.reddit.com/r/todayilearned/new/.json'\n \n def get_info(self):\n try:\n r = requests.get(self.link, headers = {'User-agent': 'your bot 0.1'})\n r.raise_for_status()\n except requests.exceptions.HTTPError as error:\n print(f'There is problem:\\n{error}')\n return False\n new_til = json.loads(r.content)\n new_til = new_til[\"data\"][\"children\"][0]['data']['title']\n new_til = new_til.replace('TIL', '').replace('Til', '').strip()\n for _ in range(len(new_til) - 1):\n if new_til[0].isalpha() == False:\n new_til = new_til.replace(new_til[0], '').strip()#.capitalize()\n else:\n break\n new_til = new_til.split(' ', 1)\n if new_til[0].lower() == 'this' or new_til[0].lower() == 'that' or new_til[0].lower() == 'about' or new_til[0].lower() == 'of':\n new_til.pop(0)\n new_til = ' '.join(new_til)\n new_til = new_til[:1].upper() + new_til[1:]\n if new_til[-1].isalnum() == True:\n new_til += '.'\n return new_til if len(new_til) < 280 else False #change for 140 when twitter working \n \n def save_new_dict(self, new_dict):\n with open('til_news_base.json', 'w') as json_file:\n json.dump(new_dict, json_file, indent=2)\n \n def read_json_file(self):\n with open('til_news_base.json') as json_file:\n data = json.load(json_file)\n self.last_key = int(sorted(list(data.keys()))[-1])\n return data\n \n def post_on_twitter(self, new_post):\n TOKEN = '1141700351290224640-h5liK9wfQfOOizRN5RuIXyEgeJl4gc'\n TOKEN_KEY = 'ElS5g6TLLIOokBOJVPLYjeEOrziwhvqoDOTLT45e1vemx'\n CON_SEC = 'p8xbnj07lHkyqjw8lxJ9XFI0T'\n CON_SEC_KEY = 'h6TA0XxuIMNm6XXUgkUChEOYrnPhSaAUhagmvPWp7cwTA6XgaP'\n my_auth = twitter.OAuth(TOKEN,TOKEN_KEY,CON_SEC,CON_SEC_KEY)\n twit = twitter.Twitter(auth=my_auth)\n twit.statuses.update(status=new_post)\n \n def program(self):\n #first load the base from file\n dict_with_news = self.read_json_file()\n #second get new posts from reddit\n new_info = self.get_info()\n #check if new post in base or if is it last post\n if new_info != False:\n if new_info != dict_with_news[str(self.last_key)]:\n dict_with_news[str(self.last_key + 1)] = new_info\n print(new_info)\n #add to base if not\n self.save_new_dict(dict_with_news)\n #print new TIL on twitter\n try:\n self.post_on_twitter(new_info)\n except:\n print(\"There was a problem with adding news to twitter.\")\n \ndef program():\n class_til = TodayILearned()\n class_til.program()\n \nif __name__ == \"__main__\":\n program()\n","sub_path":"watch_for_new_TIL_facts/watch_for_new_TIL_facts.py","file_name":"watch_for_new_TIL_facts.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"337586133","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# =============================================================================\n# Version: 0.1 (September 14, 2013)\n# Author: Riccardo Angius (riccardo.angius@me.com)\n#\n# =============================================================================\n# Copyright (c) 2013. Riccardo Angius (riccardo.angius@me.com)\n# =============================================================================\n# This file is part of Pairses: A PAttern Induced RDF Statement Extraction System.\n#\n# This is fairly beta software. Please contact the author before usage.\n# =============================================================================\nimport jsonrpclib\nfrom json import loads, dumps\nimport os, hashlib\nfrom pickling import pickleDump, pickleLoad\nfrom configuration import * \nfrom classes import *\nimport unicodedata\ncachePath = cfg['snlpcachepath']\n\nclass StanfordCoreNLP():\n\tserver = jsonrpclib.Server(\"http://localhost:8080\")\n\n\tdef parse(self, text, useCache=True):\n\t\n\t\t# Fixes some idiosyncrasies due to wiki markup conversion and text input in Wikipedia\n\t\ttext = unicode(unicodedata.normalize('NFKD', text))\n\t\t\n\t\tif text.lower() == 'q' or text.lower() == 'eof':\n\t\t\t\"These strings will terminate the SCNLP tools, which we don't want\"\n\t\t\traise InvalidSentence()\n\t\t\n\t\tif len(text) >= 1000 or text.count(',') > 30:\n\t\t\t\"\"\"\tA bug in pexpect produces \\x07 chars and messes up\n\t\t\t\twhen input is >= 1024 chars (apparently on OS X only)\n\t\t\t\tJust to be on safe side, we'll ignore sentences with more\n\t\t\t \tthan 999 chars, as they are mostly long lists anyway.\n\t\t\t\"\"\"\n\t\t\traise InvalidSentence()\n\t\t\n\t\ttextHash = hashlib.sha224(text.encode(\"ascii\",\"replace\")).hexdigest()\n\t\t\n\t\tfilename = textHash + '.snlpcache'\n\t\tpath = os.path.join(cachePath, filename)\n\n\t\tif useCache and os.path.exists(path):\n\t\t\tresults = pickleLoad(path)\n\t\telse:\n\t\t\tparsed = self.server.parse(text)\n\t\t\tresults = loads(parsed)\n\n\t\t\tpickleDump(results, path)\n\t\t\t\n\t\treturn results","sub_path":"pairseslib/stanfordcorenlp.py","file_name":"stanfordcorenlp.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"644457222","text":"## Ex 1 - 3\r\nimport matplotlib.pyplot as plt\r\ndef eulerListe(h,L,R,U):\r\n t = [0]\r\n i = [0]\r\n k = 0\r\n while i[k]<=0.95*(U/R):\r\n t.append(t[k]+h)\r\n i.append(i[k] +h*(U/L - (R/L)*i[k]))\r\n k+=1\r\n t.append(t[k]+h)\r\n i.append(i[k] +h*(U/L - (R/L)*i[k])) \r\n return (t,i)\r\n\r\nt,i = eulerListe(0.0001,1,100,1)\r\nplt.plot(t,i)\r\nplt.show()\r\n\r\n## Ex 4\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\ndef eulerTableau(n,h,L,R,U):\r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\n \r\ni,t = eulerTableau(1000,0.0001,1,100,1)\r\nplt.plot(t,i)\r\nplt.show()\r\n## Ex 5\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef eulerTableau(n,h,L,R,U):\r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\nU = 1\r\nR = 100\r\nL = 1\r\nT = 0.1\r\nabs = np.linspace(0,0.1)\r\nord = [(U/R)*(1-np.exp(-(R/L)*k)) for k in abs]\r\nplt.plot(abs,ord)\r\n\r\ndef tableVar(periode,p,U,R,L):\r\n pts = int(periode//p)\r\n i,t = eulerTableau(pts,p,L,R,U)\r\n return (i,t)\r\ni,t = tableVar(T,0.0001,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.001,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.0005,U,R,L)\r\nplt.plot(t,i)\r\ni,t = tableVar(T,0.00001,U,R,L)\r\nplt.plot(t,i)\r\nplt.legend([\"Real\",\"p=0.0001\",\"p=0.001\",\"p=0.0005\",\"p=0.00001\"])\r\nplt.show()\r\n\"\"\"\r\nLes valeurs valides semblent celles inférieures à 0.001ms\r\n\"\"\"\r\n\r\n## Ex 6\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef eulerMalin(L,R,U):\r\n tau = L/R\r\n periode = 3*tau\r\n n = 1000\r\n h = periode/n\r\n \r\n t = np.zeros(n+1)\r\n i = np.zeros(n+1)\r\n for k in range(1,n+1):\r\n t[k] = t[k-1] + h\r\n i[k] = i[k-1] + h*(U/L - (R/L)*i[k-1])\r\n return (i,t)\r\ni,t = eulerMalin(1,100,1)\r\nplt.plot(t,i)\r\n\r\nabs = np.linspace(0,0.1)\r\nord = [(U/R)*(1-np.exp(-(R/L)*k)) for k in abs]\r\nplt.plot(abs,ord)\r\nplt.legend([\"Euler Malin\",\"Real\"])\r\nplt.show()","sub_path":"TP 8 - Schéma d'Euler/ex 2.py","file_name":"ex 2.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"365178011","text":"'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n\n # if someone pastes a FILE, the content is None for SCF 13\n # and the clipboard is locked if not closed properly\n if not pcontents:\n user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n\n # ignore null character for strSource pointer\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text[:-1]))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n","sub_path":"kivy/core/clipboard/clipboard_winctypes.py","file_name":"clipboard_winctypes.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"314297454","text":"import os\nimport sys\nimport traceback\n\nimport gevent\nimport pytest\n\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.externalapis.etherscan import Etherscan\nfrom rotkehlchen.typing import ExternalService, ExternalServiceApiCredentials\n\n\n@pytest.fixture(scope='function')\ndef temp_etherscan(function_scope_messages_aggregator, tmpdir_factory):\n api_key = os.environ.get('ETHERSCAN_API_KEY', None)\n if not api_key:\n pytest.fail('No ETHERSCAN_API_KEY environment variable found.')\n directory = tmpdir_factory.mktemp('data')\n db = DBHandler(\n user_data_dir=directory,\n password='123',\n msg_aggregator=function_scope_messages_aggregator,\n )\n db.add_external_service_credentials(credentials=[\n ExternalServiceApiCredentials(service=ExternalService.ETHERSCAN, api_key=api_key),\n ])\n etherscan = Etherscan(database=db, msg_aggregator=function_scope_messages_aggregator)\n return etherscan\n\n\ndef _handle_killed_greenlets(greenlet: gevent.Greenlet) -> None:\n\n tb = ''.join(traceback.format_tb(greenlet.exc_info[2]))\n message = ('Greenlet died with exception: {}.\\n'\n 'Exception Name: {}\\nException Info: {}\\nTraceback:\\n {}'\n .format(\n greenlet.exception,\n greenlet.exc_info[0],\n greenlet.exc_info[1],\n tb,\n ))\n\n print(message)\n sys.exit(1)\n\n\n@pytest.mark.skipif(\n 'CI' in os.environ,\n reason='no real etherscan tests in Travis yet due to API key',\n)\ndef test_maximum_rate_limit_reached(temp_etherscan):\n \"\"\"\n Test that we can handle etherscan's rate limit repsponse properly\n\n Regression test for https://github.com/rotki/rotki/issues/772\"\n \"\"\"\n etherscan = temp_etherscan\n\n # Spam with concurrent requests for a bit. This triggers the problem\n count = 200\n while count > 0:\n greenlet = gevent.spawn(\n etherscan.get_account_balance,\n '0x25a63509FEF5D23FF226eb8004A3c1458D6F3AB8')\n greenlet.link_exception(_handle_killed_greenlets)\n greenlet = gevent.spawn(\n etherscan.eth_call,\n '0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4',\n '0xc455279100000000000000000000000027a2eaaa8bebea8d23db486fb49627c165baacb5',\n )\n greenlet.link_exception(_handle_killed_greenlets)\n gevent.sleep(0.001)\n count -= 1\n","sub_path":"rotkehlchen/tests/external_apis/test_etherscan.py","file_name":"test_etherscan.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"592178178","text":"# def solution(lis,a,b,stack,check):\n# addlis = [[a+1,b],[a-1,b],[a,b+1],[a,b-1]]\n# for i in addlis:\n# if 0 <= i[0] < len(lis) and 0<= i[1] < len(lis):\n# if lis[i[0]][i[1]] != 1 and stack.count(i) == 0:\n# stack.append(i)\n# check.append(lis[i[0]][i[1]])\n# solution(lis,i[0],i[1],stack,check)\n\n# T = int(input())\n# for t in range(1,T+1):\n# N = int(input())\n# lis = []\n# for a in range(N):\n# lis.append(list(map(int, input().strip())))\n \n# stack = []\n# check = []\n# for a in range(0,len(lis)):\n# for b in range(0,len(lis)):\n# if lis[a][b] == 2:\n# stack.append([a,b])\n# check.append(lis[a][b])\n# break\n# solution(lis,stack[-1][0],stack[-1][1],stack,check)\n# if check.count(3) == 1:\n# print(\"{} {}\".format(t,1))\n# else:\n# print(\"{} {}\".format(t,0))\n \ndef dfs(lis,stack):\n while True:\n a, b = stack.pop(-1)\n addlis = [[a+1,b],[a-1,b],[a,b+1],[a,b-1]]\n for i in addlis:\n if 0 <= i[0] < len(lis) and 0<= i[1] < len(lis):\n if lis[i[0]][i[1]] == 0:\n lis[i[0]][i[1]] = 4\n stack.append(i)\n elif lis[i[0]][i[1]] == 3:\n return 1\n if len(stack) == 0:\n break\n return 0\n\nT = int(input())\nfor t in range(1,T+1):\n N = int(input())\n lis = []\n for a in range(N):\n lis.append(list(map(int, input().strip())))\n stack = []\n for j in range(0,len(lis)):\n for k in range(0,len(lis)):\n if lis[j][k] == 2:\n stack.append([j,k])\n print(\"#{} {}\".format(t,dfs(lis,stack)))\n\n \n","sub_path":"KYC/algorithm/Stack/StackPractice6.py","file_name":"StackPractice6.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"71064167","text":"from pyspark.sql.types import (\n FloatType,\n StringType,\n StructType,\n StructField,\n)\n\nimport pandas as pd\nimport databricks.koalas as ks\nimport os\nimport contextlib\nfrom metrics import compute_score\n\nimport h2o\nfrom h2o.estimators import H2OXGBoostEstimator\n\nfrom model.interface import ModelInterface\n\n# WARNING: this class only works with datasets that fit in memory, as it casts Spark\n# dataframes to Pandas dataframes (H2O natively does not work with Spark).\n# Make sure you have enough memory on your machine.\n\n# Tested with the following features enabled:\n# \"engaged_with_user_follower_count\"\n# \"engaged_with_user_following_count\"\n# \"engaging_user_follower_count\"\n# \"engaging_user_following_count\"\n# all targets\n\nfrom pathlib import Path\nfrom constants import ROOT_DIR\n\n\nclass Model(ModelInterface):\n def __init__(self, include_targets=True, seed=None):\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n h2o.init()\n h2o.no_progress()\n\n is_xgboost_available = H2OXGBoostEstimator.available()\n\n if not is_xgboost_available:\n raise RuntimeError(\"H2OXGBoostEstimator is not available!\")\n\n self.model = None\n self.seed = seed\n\n # Specify default and custom features to use in the model\n self.enabled_features = [\n \"engaged_with_user_follower_count\",\n \"engaged_with_user_following_count\",\n \"engaging_user_follower_count\",\n \"engaging_user_following_count\",\n ]\n\n self.labels = [\"reply\", \"retweet\", \"retweet_with_comment\", \"like\"]\n\n # Specify extractors and auxiliaries required by the enabled features\n self.enabled_auxiliaries = []\n self.enabled_extractors = [\n \"engaged_with_user_follower_count\",\n \"engaged_with_user_following_count\",\n \"engaging_user_follower_count\",\n \"engaging_user_following_count\",\n \"binarize_timestamps\",\n ]\n if include_targets:\n self.enabled_extractors.append(\"binarize_timestamps\")\n\n @staticmethod\n def serialized_model_path_for_target(target: str) -> str:\n p = (\n Path(ROOT_DIR)\n / \"../serialized_models\"\n / f\"h2o_xgboost_baseline_{target}.model\"\n )\n return str(p.resolve())\n\n def fit(self, train_data, _valid_data, _hyperparams):\n \"\"\"Fit model to given training data and validate it.\n Returns the best model found in validation.\"\"\"\n\n # Cast to h2o frames\n train_frame = h2o.H2OFrame(train_data.to_pandas())\n\n # TODO: hyperparameter tuning; unbalancement handling?\n\n models = dict()\n for label in self.labels:\n ignored = set(self.labels) - set(label)\n model = H2OXGBoostEstimator(seed=self.seed)\n model.train(\n y=label,\n ignored_columns=list(ignored),\n training_frame=train_frame\n )\n model.save_mojo(self.serialized_model_path_for_target(label))\n models[label] = model\n\n # Save (best on valid) trained model\n self.model = models\n\n return models\n\n def predict(self, test_data):\n \"\"\"Predict test data. Returns predictions.\"\"\"\n schema = StructType(\n [\n StructField(\"reply\", FloatType(), False),\n StructField(\"retweet\", FloatType(), False),\n StructField(\"retweet_with_comment\", FloatType(), False),\n StructField(\"like\", FloatType(), False),\n StructField(\"tweet_id\", StringType(), False),\n StructField(\"engaging_user_id\", StringType(), False),\n ]\n )\n\n # DataFrame.to_pandas() drops the index, so we need to save it\n # separately and reattach it later.\n\n # H2OFrame does not provide an index like pandas, but rather appears\n # to have an internal numerical index to preserve ordering.\n # https://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/frame.html#h2oframe\n\n # So we trust that H2O keeps everything in order and drop our custom\n # index [\"tweet_id\", \"engaging_user_id\"] in favour of a \"standard\"\n # numerical index such as 0, 1, 2, ..., only to reattach it later\n # when returning the predictions DataFrame.\n\n ks_test_data_index = test_data.reset_index(drop=False)\n ks_index = ks_test_data_index[[\"tweet_id\", \"engaging_user_id\"]]\n\n h2oframe_test = h2o.H2OFrame(test_data.to_pandas())\n\n df_predictions = pd.DataFrame()\n for label in self.labels:\n df_predictions[label] = (\n self.model[label].predict(h2oframe_test).as_data_frame()[\"True\"].values\n )\n\n # Reattach real index (Lord have mercy)\n df_predictions = df_predictions.join(ks_index.to_pandas())\n ks_predictions = ks.DataFrame(df_predictions)\n\n return ks_predictions.to_spark()\n\n def load_pretrained(self):\n self.model = {}\n for label in self.labels:\n # Select the first model in the directory\n p = str(\n next(\n Path(self.serialized_model_path_for_target(label)).iterdir()\n ).resolve()\n )\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n self.model[label] = h2o.import_mojo(p)\n\n def save_to_logs(self, metrics):\n \"\"\"Save the results of the latest test performed to logs.\"\"\"\n pass\n","sub_path":"src/model/h2o_xgboost_baseline.py","file_name":"h2o_xgboost_baseline.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"324024682","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 30 18:17:51 2020\r\n\r\n@author: Utilisateur\r\n\"\"\"\r\n\r\n\r\n\r\nimport csv \r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport torch\r\nfrom torch.utils import data\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nfrom math import sqrt \r\n\r\nnp.random.seed(1)\r\n\r\n\r\n\r\n# \"Pour fichier text\"\r\ndata_file_path = 'C:/Users/Utilisateur/Documents/IA/V2O3_Pump_0.9mW_Probe_0.05mW8Step_1micron_210918_Adjusted.txt'\r\n# data = pd.read_csv(data_file_path, header = None,index_col = None, sep ='\\t')\r\n\r\n# \" pour fichier ADF\"\r\n# data2_file_path = 'C:/Users/Utilisateur/Documents/IA/V2O3_Pump_0.05mW_Probe_0.05mW_Step_1micron_26091800.ADF'\r\n# data2 = pd.read_csv(data2_file_path,header = None, index_col = None , sep = '\\t', skiprows = lambda x : x<= 9)\r\n\r\n\r\n\r\ndef conversion(L): # permet le traitement de fichier csv comme une liste\r\n T=[]\r\n for i in L:\r\n T.append(float(i.replace(',','.')))\r\n return(T)\r\n \r\ntime,ampl,phase =[],[],[]\r\nwith open(data_file_path, 'r') as csvfile: #ce programme va permettre de traiter tout enregistrement des deux capteurs\r\n spamreader = csv.reader(csvfile, delimiter='\\t')\r\n for row in spamreader:\r\n time.append(row[0])\r\n ampl.append(row[1])\r\n phase.append(row[2])\r\n \r\ntime=conversion(time) \r\nampl=conversion(ampl)\r\nphase = conversion(phase)\r\n\r\n\r\ntime_vec= np.array(time)\r\nampl_vec = np.array(ampl)\r\nphase_vec = np.array(phase)\r\n\r\nX_train = time_vec[:200]\r\nX_val = time_vec[200:281]\r\nX_test = time_vec[281:361]\r\n\r\ny_train = ampl_vec[:200]\r\ny_val = ampl_vec[200:281]\r\ny_test = ampl_vec[281:361]\r\n\r\n# calcul de mean_X_train et std_X_train, idem pour y\r\n \r\n\r\nmean_X_train = np.mean(X_train)\r\nstd_X_train = np.std(X_train)\r\n\r\nmean_y_train = np.mean(y_train)\r\nstd_y_train =np.std(y_train)\r\n\r\n\r\n\r\n\r\nclass MyDataset(data.Dataset):\r\n\r\n#Characterizes a dataset for Pytorch\r\n def __init__(self, data_feature, data_target):\r\n #Initialization\r\n self.data_feature = data_feature\r\n self.data_target = data_target\r\n # self.transformed_feature = self.transforms_feature()\r\n # self.transformed_target = self.transforms_target()\r\n \r\n def __len__(self):\r\n #Denotes the total number of samples\r\n return len(self.data_feature)\r\n \r\n def __getitem__(self, index):\r\n #Generates one sample of data\r\n # Select sample\r\n # data_feature = torch.from_numpy(self.transformed_feature[index]).float()\r\n # data_target = torch.from_numpy(self.transformed_target[index]).float()\r\n # return data_feature, data_target\r\n X_train_normalized = (self.data_feature[index] - mean_X_train) / std_X_train\r\n y_train_normalized = (self.data_target[index] - mean_y_train) / std_y_train\r\n return torch.from_numpy(np.array(X_train_normalized,ndmin=1)).float(), torch.from_numpy(np.array(y_train_normalized, ndmin = 1)).float()\r\n \r\n \r\ntraining_set = MyDataset(X_train,y_train) # on charge nos données\r\ntrain_loading = torch.utils.data.DataLoader(training_set, batch_size= 100)\r\n \r\nval_set = MyDataset(X_val, y_val) \r\nval_loading = torch.utils.data.DataLoader(val_set, batch_size= 100)\r\n \r\ntest_set = MyDataset(X_test,y_test) \r\ntest_loading = torch.utils.data.DataLoader(test_set, batch_size= 100)\r\n\r\n\r\n# Ecriture du réseau de neurones (reprise du tp_deep)\r\n\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.FC1 = nn.Linear(1,6)\r\n self.FC2 = nn.Linear(6, 1)\r\n def forward(self, x):\r\n x = F.relu(self.FC1(x)) \r\n x = self.FC2(x)\r\n return x\r\n\r\nmodel = Net()\r\n\r\n\r\ncriterion = nn.MSELoss()\r\n#optimizer = torch.optim.SGD(model.parameters(),lr=0.0001, weight_decay= 0.001, momentum = 0.9)\r\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.03,\r\n weight_decay = 0.001) \r\n\r\nloss_list_train = []\r\nloss_list_val = []\r\nloss_list= []\r\nloss_list_test = []\r\n\r\ndef train(net, train_loader, optimizer, epoch):\r\n net.train()\r\n total_loss=0\r\n for idx,(data, target) in enumerate(train_loader, 0):\r\n #data, target = data.to(device), target.to(device)\r\n optimizer.zero_grad()\r\n outputs = net(data)\r\n loss = criterion(outputs,target)\r\n loss.backward()\r\n total_loss +=loss.cpu().item()\r\n optimizer.step()\r\n loss_list_train.append(total_loss/len(train_loader))\r\n #torch.optim.lr_scheduler.step()\r\n #print('Epoch:', epoch , 'average training loss ', total_loss/ len(train_loader))\r\n\r\n\r\ndef test(net,test_loader,L):\r\n net.eval()\r\n total_loss = 0\r\n for idx,(data, target) in enumerate(test_loader,0):\r\n outputs = net(data)\r\n outputs = outputs * std_X_train + mean_X_train\r\n target = target * std_y_train + mean_y_train\r\n loss = criterion(outputs,target)\r\n total_loss += sqrt(loss.cpu().item())\r\n L.append(total_loss/len(test_loader))\r\n #print('average testing loss', total_loss/len(test_loader))\r\n \r\ndef test_no_norm(net,test_loader,L):\r\n net.eval()\r\n total_loss = 0\r\n for idx,(data, target) in enumerate(test_loader,0):\r\n outputs = net(data)\r\n loss = criterion(outputs,target)\r\n total_loss += sqrt(loss.cpu().item())\r\n L.append(total_loss/len(test_loader))\r\n #print('average testing loss', total_loss/len(test_loader))\r\n \r\n \r\nfor epoch in range(50): \r\n train(model,train_loading,optimizer,epoch)\r\n test(model,val_loading,loss_list_val)\r\n test_no_norm(model, val_loading,loss_list)\r\n test_no_norm(model,test_loading,loss_list_test)\r\nprint('Epoch:', epoch , 'average training loss ', loss_list_train[-1])\r\nprint( 'average testing loss ', loss_list_val[-1])\r\n \r\n\r\nplt.figure(2)\r\nplt.plot(loss_list_train,'r',label = 'Training loss')\r\n# plt.plot(loss_list,'g',label = ' Validation loss')\r\n# plt.plot(loss_list_test,'b',label = ' Testing loss')\r\n# plt.legend()\r\n \r\n","sub_path":"nn_data_bosch.py","file_name":"nn_data_bosch.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"409788173","text":"from types import SimpleNamespace\nfrom django.contrib.contenttypes.models import ContentType\nfrom exponent_server_sdk import PushMessage, PushClient\nfrom comments.models import Comment, Reply\nfrom common.abstract_models import AbstractPostModel\nfrom messaging.models import Message\nfrom notifications.models import Notification\nfrom notifications.serializers import NotificationSerializer\n\n\n# A function that sends push notifications to all tagged users for a shared item\ndef create_tagged_user_notification(\n created_data: AbstractPostModel or Comment or Message or Reply,\n) -> None:\n tagged_users = created_data.tagged_users.all()\n if len(tagged_users) > 0:\n content_type = ContentType.objects.get_for_model(created_data)\n if content_type.model in [\"ad\", \"event\"]:\n content = f\"{created_data.creator.preferred_name} has tagged you in an {content_type.model}!\"\n else:\n content = f\"{created_data.creator.preferred_name} has tagged you in a {content_type.model}!\"\n notification = Notification(\n content=content,\n content_type=content_type,\n object_id=created_data.id,\n creator_id=created_data.creator.id,\n )\n notification.save()\n notification.receivers.add(*tagged_users)\n send_push_notifications(notification)\n\n\n# A function that sends push notifications to the receivers of a notification object\ndef send_push_notifications(notification: Notification) -> None:\n push_messages = []\n request = SimpleNamespace()\n request.user = notification.creator\n for receiver in notification.receivers.all():\n for device in receiver.devices.all():\n push_messages.append(\n PushMessage(\n to=device.expo_push_token,\n body=notification.content,\n data=dict(\n NotificationSerializer(\n notification, context={\"request\": request}\n ).data\n ),\n )\n )\n\n PushClient().publish_multiple(push_messages)\n","sub_path":"api/src/common/notification_helpers.py","file_name":"notification_helpers.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"618840553","text":"from flask import Flask, abort\nfrom flask.ext import restful\nfrom flask.ext.restful import reqparse\nimport requests\nimport collections\nimport json\nimport pygerduty\n\npagerdutyAPIKey = 'xxx'\npagerdutyOrg = 'xxx'\n\nclass PagerDutyIncidentsAPI(restful.Resource):\n def __init__(self):\n self.pg = pygerduty.PagerDuty(pagerdutyOrg, pagerdutyAPIKey)\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('user', type=str, default=\"paul.schooss@klout.com\", help=\"Pager Duty username preforming action\")\n self.parser.add_argument('action', type=str, default=\"NOOP\", help=\"Action for the incident(s), resolve and acknowledge are valid\")\n self.parser.add_argument('type', type=str, default='triggered,acknowledged', help=\"Type of incident e.g. triggered, acknowledge, etc.\")\n super(PagerDutyIncidentsAPI, self).__init__()\n\n def get(self):\n args = self.parser.parse_args()\n return [p.to_json() for p in pg.incidents.list(status=args['type'])]\n\n def put(self):\n args = self.parser.parse_args()\n me = next(self.pg.users.list(query=args['user'], limit=1))\n if args['action'] == 'resolve':\n for incident in self.pg.incidents.list(status=args['type'],assigned_to_user=me.id):\n incident.resolve(requester_id=me.id)\n return \"All incidents resolved for user %s\" % args['user']\n if args['action'] == 'acknowledge':\n for incident in self.pg.incidents.list(status=args['type'],assigned_to_user=me.id):\n incident.acknowledge(requester_id=me.id)\n return \"All incidents acknowledged for user %s\" % args['user']\n return \"Action not valid: %s\" % args['action']\n\n# class PagerDutyIncidentAPI(restful.Resource):\n# def __init__(self):\n# self.parser = reqparse.RequestParser()\n# self.parser.add_argument('user', type=str, default=\"PI8NLOA\", location=\"json\")\n# super(TaskAPI, self).__init__()\n# def put(self,incidentNumber):\n# pass\n","sub_path":"resources/pagerduty.py","file_name":"pagerduty.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"613958087","text":"from __future__ import print_function\n###############################################################\n#\n# Skeleton top job options for ESD->AOD\n# Put here outputs that require rec.doAOD=True\n#\n# New version for revamped job transforms\n#\n# $Id: skeleton.ESDtoAOD_tf.py 700697 2015-10-15 09:48:11Z lerrenst $\n#\n#==============================================================\n\n# Common job options disable most RecExCommon by default. Re-enable below on demand.\ninclude(\"RecJobTransforms/CommonRecoSkeletonJobOptions.py\")\nrec.doAOD=True\n\n#from AthenaCommon.Logging import logging\nimport logging\nrecoLog = logging.getLogger('esd_to_aod')\nrecoLog.info( '****************** STARTING ESD->AOD MAKING *****************' )\n\nfrom AthenaCommon.AppMgr import ServiceMgr; import AthenaPoolCnvSvc.AthenaPool\nfrom AthenaCommon.AthenaCommonFlags import athenaCommonFlags\n\n## Input\nif hasattr(runArgs,\"inputFile\"): athenaCommonFlags.FilesInput.set_Value_and_Lock( runArgs.inputFile )\nif hasattr(runArgs,\"inputESDFile\"):\n globalflags.InputFormat.set_Value_and_Lock('pool')\n rec.readESD.set_Value_and_Lock( True )\n rec.readRDO.set_Value_and_Lock( False )\n athenaCommonFlags.PoolESDInput.set_Value_and_Lock( runArgs.inputESDFile )\n\n## Pre-exec\nif hasattr(runArgs,\"preExec\"):\n recoLog.info(\"transform pre-exec\")\n for cmd in runArgs.preExec:\n recoLog.info(cmd)\n exec(cmd)\n\n## Pre-include\nif hasattr(runArgs,\"preInclude\"): \n for fragment in runArgs.preInclude:\n print(\"preInclude\",fragment)\n include(fragment)\n\n## Outputs\nif hasattr(runArgs,\"outputAODFile\"):\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteAOD.set_Value_and_Lock( True ) \n athenaCommonFlags.PoolAODOutput.set_Value_and_Lock( runArgs.outputAODFile )\n # Begin temporary trigger block\n if TriggerFlags.doMT():\n # Don't run any trigger - only pass the HLT contents from ESD to AOD\n from RecExConfig.RecAlgsFlags import recAlgs\n recAlgs.doTrigger.set_Value_and_Lock( False )\n rec.doTrigger.set_Value_and_Lock( False )\n # Add HLT output\n from TriggerJobOpts.HLTTriggerResultGetter import HLTTriggerResultGetter\n hltOutput = HLTTriggerResultGetter()\n # Add Trigger menu metadata\n if rec.doFileMetaData():\n from RecExConfig.ObjKeyStore import objKeyStore\n metadataItems = [ \"xAOD::TriggerMenuContainer#TriggerMenu\",\n \"xAOD::TriggerMenuAuxContainer#TriggerMenuAux.\" ]\n objKeyStore.addManyTypesMetaData( metadataItems )\n else: # not TriggerFlags.doMT()\n pass # See TriggerJobOpts/python/TriggerGetter.py for Run 2. Called by RecExCommon\n\nif hasattr(runArgs,\"outputTAGFile\"):\n # should be used as outputTAGFile_e2a=myTAG.root so that it does not trigger AODtoTAG\n # if writing TAG file, need AOD object in any case\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteTAG.set_Value_and_Lock( True )\n athenaCommonFlags.PoolTAGOutput.set_Value_and_Lock( runArgs.outputTAGFile )\n\nif hasattr(runArgs,\"tmpAOD\"):\n rec.doAOD.set_Value_and_Lock( True )\n rec.doWriteAOD.set_Value_and_Lock( True ) \n athenaCommonFlags.PoolAODOutput.set_Value_and_Lock( runArgs.tmpAOD )\n\nif hasattr(runArgs,\"outputHIST_AOD_INTFile\"):\n rec.doMonitoring.set_Value_and_Lock(True)\n from AthenaMonitoring.DQMonFlags import DQMonFlags\n DQMonFlags.histogramFile.set_Value_and_Lock( runArgs.outputHIST_AOD_INTFile )\n\nif hasattr(runArgs,\"outputNTUP_BTAGFile\"):\n from BTagging.BTaggingFlags import BTaggingFlags\n BTaggingFlags.doJetTagNtuple = True\n BTaggingFlags.JetTagNtupleName = runArgs.outputNTUP_BTAGFile\n\nif hasattr(runArgs, \"outputNTUP_HIGHMULTFile\"):\n from TrigMbD3PDMaker.TrigMbD3PDMakerFlags import trigMbD3PDflags\n trigMbD3PDflags.FileName=runArgs.outputNTUP_HIGHMULTFile\n include(\"TrigMbD3PDMaker/HighMultD3PD_jobOptions.py\")\n\nif hasattr(runArgs,\"outputNTUP_ENHBIASFile\"):\n from TrigCostAthena.TrigCostAthenaFlags import TrigCostAthenaFlags\n TrigCostAthenaFlags.StoreNtVerticesOutputFile.set_Value_and_Lock( runArgs.outputNTUP_ENHBIASFile )\n TrigCostAthenaFlags.DoStoreNtVertices.set_Value_and_Lock( True )\n if hasattr(runArgs,\"inputESDFile\") and not hasattr(runArgs,\"inputFile\"):\n athenaCommonFlags.FilesInput.set_Value_and_Lock( runArgs.inputESDFile )\n include(\"TrigCostAthena/ESDtoNTUP_ENHBIAS.py\")\n\nif hasattr(runArgs,\"outputHIST_PHYSVALMONFile\"):\n rec.doPhysValMonHists=True\n \n ## Setup the output file(s):\n from GaudiSvc.GaudiSvcConf import THistSvc\n svcMgr += THistSvc()\n output=svcMgr.THistSvc.Output\n svcMgr.THistSvc.Output+= [\"PhysValMon DATAFILE='\"+runArgs.outputHIST_PHYSVALMONFile+\"' OPT='RECREATE'\"]\n # now done in RecExCommon_topOption to ensure the right ordering of algs.\n # include(\"PhysValMon/PhysValMon_RecoOpt.py\")\n \nif hasattr(runArgs, 'outputXML_JiveXMLFile'):\n jp.Rec.doJiveXML.set_Value_and_Lock(True)\n\nrec.OutputFileNameForRecoStep=\"ESDtoAOD\"\n\n#========================================================\n# Central topOptions (this is one is a string not a list)\n#========================================================\nif hasattr(runArgs,\"topOptions\"): include(runArgs.topOptions)\nelse: include( \"RecExCommon/RecExCommon_topOptions.py\" )\n\n# Remove unwanted back navigation to ESD when ESD is temporary\nif hasattr(runArgs,\"outputAODFile\"):\n if hasattr(runArgs,\"ESDFileIO\") and runArgs.ESDFileIO == \"temporary\":\n try:\n StreamAOD.ExtendProvenanceRecord = False\n except:\n recoLog.info(\"StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.\")\n\n#D3PDMaker outputs\nif hasattr(runArgs,\"outputNTUP_MINBIASFile\"):\n from D3PDMakerConfig.D3PDProdFlags import prodFlags\n prodFlags.WriteMinBiasD3PD.FileName = runArgs.outputNTUP_MINBIASFile\n prodFlags.WriteMinBiasD3PD.set_Value_and_Lock( True )\n include( prodFlags.WriteMinBiasD3PD.DPDMakerScript )\n pass\n\nif hasattr(runArgs,\"outputNTUP_TRIGFile\"):\n from D3PDMakerConfig.D3PDProdFlags import prodFlags\n prodFlags.WriteTriggerD3PD.FileName = runArgs.outputNTUP_TRIGFile\n prodFlags.WriteTriggerD3PD.set_Value_and_Lock( True )\n include( prodFlags.WriteTriggerD3PD.DPDMakerScript )\n pass\n\nif hasattr(runArgs,\"outputDESDM_BEAMSPOTFile\"):\n #needs to be used with: preInclude=InDetBeamSpotFinder/BeamSpotRecoPreInclude_standard.py\n from InDetBeamSpotFinder import BeamSpotDPDFlags \n primDPD.WriteDESDM_BEAMSPOTStream.FileName=runArgs.outputDESDM_BEAMSPOTFile\n primDPD.WriteDESDM_BEAMSPOTStream.set_Value_and_Lock( True )\n include(\"InDetBeamSpotFinder/DESDM_BEAMSPOTFragment.py\")\n\n#==========================================================\n# Use LZIB for compression of temporary outputs of AthenaMP\n#==========================================================\nif hasattr(runArgs, \"outputAODFile\") and '_000' in runArgs.outputAODFile:\n ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ \"DatabaseName = '\" + athenaCommonFlags.PoolAODOutput()+ \"'; COMPRESSION_ALGORITHM = '1'\" ]\n ServiceMgr.AthenaPoolCnvSvc.PoolAttributes += [ \"DatabaseName = '\" + athenaCommonFlags.PoolAODOutput()+ \"'; COMPRESSION_LEVEL = '1'\" ]\n\n## Post-include\nif hasattr(runArgs,\"postInclude\"): \n for fragment in runArgs.postInclude:\n include(fragment)\n\n## Post-exec\nif hasattr(runArgs,\"postExec\"):\n recoLog.info(\"transform post-exec\")\n for cmd in runArgs.postExec:\n recoLog.info(cmd)\n exec(cmd)\n","sub_path":"Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py","file_name":"skeleton.ESDtoAOD_tf.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"163018388","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n#set title\n\nimage = Image.open('images/oyster.png')\nst.image(image, width = 800)\n\ndef main():\n activities = ['Intro to the Chesapeake Bay Challenge', 'Data Preparation',\n 'Data Visualization', 'Total Nitrogen Model']\n option = st.sidebar.selectbox('Selection Option:', activities)\n\n#Intro\n if option == 'Intro to the Chesapeake Bay Challenge':\n st.title('Intro to the Chesapeake Bay Challenge')\n title_page = \"\"\"\n \n Intro to the Chesapeake Bay Challenge
\n \n \"\"\"\n st.markdown(title_page,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n \n Sidebar
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n title_write = \"\"\"\n put writing here Jen\n \"\"\"\n\n st.markdown(title_write,unsafe_allow_html=True)\n\n\n#Data Preparation\n elif option == 'Data Preparation':\n st.title('Data Preparation')\n html_temp = \"\"\"\n \n Data Preparation
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n \n Sidebar
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n explorationwrite_up = \"\"\"\n Jen write here\n \"\"\"\n st.markdown(explorationwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n\n#Data Visualization\n elif option == 'Data Visualization':\n st.title('Data Visualization')\n html_temp = \"\"\"\n \n Data Visualization
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n \n Sidebar
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n vizwrite_up = \"\"\"\n Jen write here\n ```python\n This is how I write code here.\n ```\n \"\"\"\n st.markdown(vizwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n#Nitrogen Modeling\n elif option == 'Total Nitrogen Model':\n st.title('Total Nitrogen Model')\n html_temp = \"\"\"\n \n Total Nitrogen Model
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n if st.sidebar.checkbox('Sidebar'):\n html_temp = \"\"\"\n \n Sidebar
\n \n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n\n\n modelwrite_up = \"\"\"\n Jen write here\n \"\"\"\n st.markdown(modelwrite_up, unsafe_allow_html=True)\n\n image = Image.open('images/oyster2.png')\n st.image(image, width = 800)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hackthebay.py","file_name":"hackthebay.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"229359526","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\n# -------------------------------------------------------------------------\n\n\nfrom ..idasix import QtWidgets\n\nfrom .base import BaseDialog\n\n\nclass GuiDialog(BaseDialog, QtWidgets.QDialog):\n\n def __init__(self, title=\"\", modal=True, **kwargs):\n super(GuiDialog, self).__init__(**kwargs)\n self.setModal(modal)\n self.setWindowTitle(title)\n self.response = None\n self.statusLbl = None\n\n self.base_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.base_layout)\n\n def bottom_layout(self, ok_text=\"&Ok\", cencel_text=\"&Cancel\"):\n self.statusLbl = QtWidgets.QLabel()\n self.base_layout.addWidget(self.statusLbl)\n\n ok_btn = QtWidgets.QPushButton(ok_text)\n ok_btn.setDefault(True)\n cancel_btn = QtWidgets.QPushButton(cencel_text)\n size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,\n QtWidgets.QSizePolicy.Fixed)\n ok_btn.setSizePolicy(size_policy)\n cancel_btn.setSizePolicy(size_policy)\n button_lyt = QtWidgets.QHBoxLayout()\n button_lyt.addWidget(ok_btn)\n button_lyt.addWidget(cancel_btn)\n self.base_layout.addLayout(button_lyt)\n\n ok_btn.clicked.connect(self.submit_base)\n cancel_btn.clicked.connect(self.reject)\n\n def exception_base(self, exception):\n super(GuiDialog, self).exception_base(exception)\n if hasattr(exception, 'errors'):\n errors = (\"{}: {}\".format(k, \", \".join(v))\n for k, v in exception.errors())\n exception_string = \"\\t\" + \"\\n\\t\".join(errors)\n elif hasattr(exception, 'message'):\n exception_string = exception.message\n else:\n exception_string = str(exception)\n self.statusLbl.setText(\"Error(s) occured:\\n{}\".format(exception_string))\n self.statusLbl.setStyleSheet(\"color: red;\")\n","sub_path":"ida_plugins/rematch/rematch/dialogs/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"398838661","text":"import graphene\nfrom lingvodoc.schema.gql_holders import (\n LingvodocObjectType,\n CompositeIdHolder,\n CreatedAt,\n Relationship,\n SelfHolder,\n FieldHolder,\n ParentLink,\n MarkedForDeletion,\n Position,\n client_id_check,\n del_object,\n ResponseError,\n LingvodocID,\n)\nfrom lingvodoc.models import (\n DBSession,\n DictionaryPerspectiveToField as dbDictionaryPerspectiveToField,\n)\n\nfrom lingvodoc.utils.creation import create_dictionary_persp_to_field\n\nclass Column(LingvodocObjectType):\n \"\"\"\n #created_at | timestamp without time zone | NOT NULL\n #object_id | bigint | NOT NULL\n #client_id | bigint | NOT NULL\n #parent_object_id | bigint |\n #parent_client_id | bigint |\n #self_client_id | bigint |\n #self_object_id | bigint |\n #field_client_id | bigint | NOT NULL\n #field_object_id | bigint | NOT NULL\n #link_client_id | bigint |\n #link_object_id | bigint |\n #marked_for_deletion | boolean | NOT NULL\n #position | integer | NOT NULL\n \"\"\"\n dbType = dbDictionaryPerspectiveToField\n\n class Meta:\n interfaces = (CreatedAt,\n CompositeIdHolder,\n Relationship,\n SelfHolder,\n FieldHolder,\n ParentLink,\n MarkedForDeletion,\n Position)\n pass\n\n\nclass CreateColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n create_column(parent_id: [1204,19664], field_id: [66, 6],\n position: 1) {\n triumph\n column{\n id\n position\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"create_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ],\n \"position\": 1\n }\n }\n }\n \"\"\"\n\n class Arguments:\n id = LingvodocID()\n parent_id = LingvodocID(required=True)\n field_id = LingvodocID(required=True)\n self_id = LingvodocID()\n link_id = LingvodocID()\n position = graphene.Int(required=True)\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n @client_id_check()\n def mutate(root, info, **args):\n id = args.get(\"id\")\n client_id = id[0] if id else info.context[\"client_id\"]\n object_id = id[1] if id else None\n id = [client_id, object_id]\n parent_id = args.get('parent_id')\n info.context.acl_check('edit', 'perspective', parent_id)\n field_id = args.get('field_id')\n self_id = args.get('self_id')\n link_id = args.get('link_id')\n position = args.get('position')\n field_object = create_dictionary_persp_to_field(id=id,\n parent_id=parent_id,\n field_id=field_id,\n self_id=self_id,\n link_id=link_id,\n position=position)\n DBSession.add(field_object)\n DBSession.flush()\n column = Column(id=[field_object.client_id, field_object.object_id])\n column.dbObject = field_object\n return CreateColumn(column=column, triumph=True)\n\n\nclass UpdateColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n update_column(id: [949, 2493], position: 5) {\n triumph\n perspective_to_field{\n id\n position\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"update_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ],\n \"position\": 5\n }\n }\n }\n \"\"\"\n\n class Arguments:\n id = LingvodocID(required=True)\n parent_id = LingvodocID()\n field_id = LingvodocID()\n self_id = LingvodocID()\n link_id = LingvodocID()\n position = graphene.Int()\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n def mutate(root, info, **args):\n id = args.get(\"id\")\n client_id, object_id = id\n field_object = DBSession.query(dbDictionaryPerspectiveToField).filter_by(client_id=client_id,\n object_id=object_id).first()\n if not field_object or field_object.marked_for_deletion:\n raise ResponseError(message=\"Error: No such field object in the system\")\n\n info.context.acl_check('edit', 'perspective',\n (field_object.parent_client_id, field_object.parent_object_id))\n field_id = args.get('field_id')\n self_id = args.get('self_id')\n link_id = args.get('link_id')\n position = args.get('position')\n if field_id:\n field_object.field_client_id, field_object.field_object_id = field_id\n\n # Attaching or de-attaching as a nested field.\n\n if self_id:\n\n field_object.self_client_id, field_object.self_object_id = (\n self_id if self_id[0] > 0 else (None, None))\n\n if link_id:\n field_object.link_client_id, field_object.link_object_id = link_id\n if position:\n field_object.position = position\n column = Column(id=[field_object.client_id, field_object.object_id])\n column.dbObject = field_object\n return UpdateColumn(column=column, triumph=True)\n\n\n\nclass DeleteColumn(graphene.Mutation):\n \"\"\"\n example:\n mutation {\n delete_column(id: [949, 2493]) {\n triumph\n column{\n id\n }\n }\n }\n\n (this example works)\n returns:\n\n {\n \"delete_column\": {\n \"triumph\": true,\n \"column\": {\n \"id\": [\n 949,\n 2493\n ]\n }\n }\n }\n \"\"\"\n class Arguments:\n id = LingvodocID(required=True)\n\n column = graphene.Field(Column)\n triumph = graphene.Boolean()\n\n @staticmethod\n def mutate(root, info, **args):\n id = args.get('id')\n client_id, object_id = id\n column_object = DBSession.query(dbDictionaryPerspectiveToField).filter_by(client_id=client_id,\n object_id=object_id).first()\n perspective_ids = (column_object.parent_client_id, column_object.parent_object_id)\n info.context.acl_check('edit', 'perspective', perspective_ids)\n if not column_object or column_object.marked_for_deletion:\n raise ResponseError(message=\"No such column object in the system\")\n del_object(column_object, \"delete_column\", info.context.get('client_id'))\n column = Column(id=id)\n column.dbObject = column_object\n return DeleteColumn(column=column, triumph=True)\n\n","sub_path":"lingvodoc/schema/gql_column.py","file_name":"gql_column.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"513500207","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ContainerGroupPropertiesInstanceView(Model):\n \"\"\"The instance view of the container group. Only valid in response.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar events: The events of this container group.\n :vartype events: list of :class:`Event\n `\n :ivar state: The state of the container group. Only valid in response.\n :vartype state: str\n \"\"\"\n\n _validation = {\n 'events': {'readonly': True},\n 'state': {'readonly': True},\n }\n\n _attribute_map = {\n 'events': {'key': 'events', 'type': '[Event]'},\n 'state': {'key': 'state', 'type': 'str'},\n }\n\n def __init__(self):\n self.events = None\n self.state = None\n","sub_path":"azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_group_properties_instance_view.py","file_name":"container_group_properties_instance_view.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"464295478","text":"# coding: utf-8\nimport datetime\nimport hashlib\n\nfrom flask import Blueprint, render_template, request, redirect, Response, jsonify\nfrom flask_login import login_required\nfrom werkzeug.utils import secure_filename\nfrom pinyin import pinyin\n\nfrom app import db\nfrom app.model.ResultModel import ResultModel\nfrom app.entity.ImageEntity import ImageEntity\n\nimage_bp = Blueprint('image', __name__)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ['png', 'jpg', 'jpeg', 'gif']\n\n\n@image_bp.route('/attach/upload', methods=['GET', 'POST'])\n@login_required\ndef image_upload():\n if request.method == 'POST':\n # 通过POST方式上传图片\n image_file = request.files['file']\n filename = secure_filename(pinyin.get(image_file.filename, format=\"numerical\"))\n\n if allowed_file(filename):\n image_entity = ImageEntity()\n image_entity.name = image_file.filename\n image_entity.type = image_file.content_type\n image_entity.data = image_file.read()\n image_entity.time = datetime.datetime.now()\n\n md5 = hashlib.md5()\n md5.update(filename + datetime.datetime.strftime(image_entity.time, '%Y%m%d%H%M%S'))\n image_entity.md5_name = md5.hexdigest() + '.' + filename.rsplit('.', 1)[1]\n\n db.session.add(image_entity)\n db.session.commit()\n\n return redirect('/attach/' + image_entity.md5_name)\n else:\n result = ResultModel(ResultModel.FAILED_CODE, '图片格式不合法', None)\n return jsonify(vars(result))\n\n return render_template('upload.html')\n\n\n@image_bp.route('/attach/')\ndef image(md5_name):\n image_entity = ImageEntity.query.filter_by(md5_name=md5_name).first_or_404()\n return Response(image_entity.data, mimetype=image_entity.type)\n\n\n@image_bp.route('/attaches')\n@login_required\ndef archives():\n image_list = ImageEntity.query.filter(ImageEntity.time >= datetime.date.today())\\\n .order_by(ImageEntity.time.desc()).all()\n return render_template('attaches.html', image_list=image_list)\n\n\n","sub_path":"app/controller/image_controller.py","file_name":"image_controller.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"622535595","text":"\"\"\"\nCustom Authenticator to use Google OAuth with JupyterHub.\n\nDerived from the GitHub OAuth authenticator.\n\"\"\"\n\nimport os\nimport json\nimport urllib.parse\n\nfrom tornado import gen\nfrom tornado.httpclient import HTTPRequest, AsyncHTTPClient\nfrom tornado.auth import GoogleOAuth2Mixin\nfrom tornado.web import HTTPError\n\nfrom traitlets import Dict, Unicode, List, default, validate\n\nfrom jupyterhub.auth import LocalAuthenticator\nfrom jupyterhub.utils import url_path_join\n\nfrom .oauth2 import OAuthLoginHandler, OAuthCallbackHandler, OAuthenticator\n\ndef check_user_in_groups(member_groups, allowed_groups):\n # Check if user is a member of any group in the allowed groups\n if any(g in member_groups for g in allowed_groups):\n return True # user _is_ in group\n else:\n return False\n\n\nclass GoogleOAuthenticator(OAuthenticator, GoogleOAuth2Mixin):\n google_api_url = Unicode(\"https://www.googleapis.com\", config=True)\n\n @default('google_api_url')\n def _google_api_url(self):\n \"\"\"get default google apis url from env\"\"\"\n google_api_url = os.getenv('GOOGLE_API_URL')\n\n # default to googleapis.com\n if not google_api_url:\n google_api_url = 'https://www.googleapis.com'\n\n return google_api_url\n\n @default('scope')\n def _scope_default(self):\n return ['openid', 'email']\n\n @default(\"authorize_url\")\n def _authorize_url_default(self):\n return \"https://accounts.google.com/o/oauth2/v2/auth\"\n\n @default(\"token_url\")\n def _token_url_default(self):\n return \"%s/oauth2/v4/token\" % (self.google_api_url)\n\n google_service_account_keys = Dict(\n Unicode(),\n help=\"Service account keys to use with each domain, see https://developers.google.com/admin-sdk/directory/v1/guides/delegation\"\n ).tag(config=True)\n\n gsuite_administrator = Dict(\n Unicode(),\n help=\"Username of a G Suite Administrator for the service account to act as\"\n ).tag(config=True)\n\n google_group_whitelist = Dict(\n List(Unicode()),\n help=\"Automatically whitelist members of selected groups\"\n ).tag(config=True)\n\n admin_google_groups = Dict(\n List(Unicode()),\n help=\"Groups whose members should have Jupyterhub admin privileges\"\n ).tag(config=True)\n\n user_info_url = Unicode(\n \"https://www.googleapis.com/oauth2/v1/userinfo\", config=True\n )\n\n hosted_domain = List(\n Unicode(),\n config=True,\n help=\"\"\"List of domains used to restrict sign-in, e.g. mycollege.edu\"\"\",\n )\n\n @default('hosted_domain')\n def _hosted_domain_from_env(self):\n domains = []\n for domain in os.environ.get('HOSTED_DOMAIN', '').split(';'):\n if domain:\n # check falsy to avoid trailing separators\n # adding empty domains\n domains.append(domain)\n return domains\n\n @validate('hosted_domain')\n def _cast_hosted_domain(self, proposal):\n \"\"\"handle backward-compatibility with hosted_domain is a single domain as a string\"\"\"\n if isinstance(proposal.value, str):\n # pre-0.9 hosted_domain was a string\n # set it to a single item list\n # (or if it's empty, an empty list)\n if proposal.value == '':\n return []\n return [proposal.value]\n return proposal.value\n\n login_service = Unicode(\n os.environ.get('LOGIN_SERVICE', 'Google'),\n config=True,\n help=\"\"\"Google Apps hosted domain string, e.g. My College\"\"\",\n )\n\n async def authenticate(self, handler, data=None, google_groups=None):\n code = handler.get_argument(\"code\")\n body = urllib.parse.urlencode(\n dict(\n code=code,\n redirect_uri=self.get_callback_url(handler),\n client_id=self.client_id,\n client_secret=self.client_secret,\n grant_type=\"authorization_code\",\n )\n )\n\n http_client = AsyncHTTPClient()\n\n response = await http_client.fetch(\n self.token_url,\n method=\"POST\",\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n body=body,\n )\n\n user = json.loads(response.body.decode(\"utf-8\", \"replace\"))\n access_token = str(user['access_token'])\n\n response = await http_client.fetch(\n self.user_info_url + '?access_token=' + access_token\n )\n\n if not response:\n handler.clear_all_cookies()\n raise HTTPError(500, 'Google authentication failed')\n\n bodyjs = json.loads(response.body.decode())\n user_email = username = bodyjs['email']\n user_email_domain = user_email.split('@')[1]\n\n if not bodyjs['verified_email']:\n self.log.warning(\"Google OAuth unverified email attempt: %s\", user_email)\n raise HTTPError(403, \"Google email {} not verified\".format(user_email))\n\n if self.hosted_domain:\n if user_email_domain not in self.hosted_domain:\n self.log.warning(\n \"Google OAuth unauthorized domain attempt: %s\", user_email\n )\n raise HTTPError(\n 403,\n \"Google account domain @{} not authorized.\".format(\n user_email_domain\n ),\n )\n if len(self.hosted_domain) == 1:\n # unambiguous domain, use only base name\n username = user_email.split('@')[0]\n\n user_info = {\n 'name': username,\n 'auth_state': {'access_token': access_token, 'google_user': bodyjs}\n }\n\n if self.admin_google_groups or self.google_group_whitelist:\n user_info = await self._add_google_groups_info(user_info, google_groups)\n\n return user_info\n\n def _service_client_credentials(self, scopes, user_email_domain):\n \"\"\"\n Return a configured service client credentials for the API.\n \"\"\"\n try:\n from google.oauth2 import service_account\n except:\n raise ImportError(\n \"Could not import google.oauth2's service_account,\"\n \"you may need to run pip install oauthenticator[googlegroups] or not declare google groups\"\n )\n\n gsuite_administrator_email = \"{}@{}\".format(self.gsuite_administrator[user_email_domain], user_email_domain)\n self.log.debug(\"scopes are %s, user_email_domain is %s\", scopes, user_email_domain)\n credentials = service_account.Credentials.from_service_account_file(\n self.google_service_account_keys[user_email_domain],\n scopes=scopes\n )\n\n credentials = credentials.with_subject(gsuite_administrator_email)\n\n return credentials\n\n def _service_client(self, service_name, service_version, credentials, http=None):\n \"\"\"\n Return a configured service client for the API.\n \"\"\"\n try:\n from googleapiclient.discovery import build\n except:\n raise ImportError(\n \"Could not import googleapiclient.discovery's build,\"\n \"you may need to run pip install oauthenticator[googlegroups] or not declare google groups\"\n )\n\n self.log.debug(\"service_name is %s, service_version is %s\", service_name, service_version)\n\n return build(\n serviceName=service_name,\n version=service_version,\n credentials=credentials,\n cache_discovery=False,\n http=http)\n\n async def _google_groups_for_user(self, user_email, credentials, http=None):\n \"\"\"\n Return google groups a given user is a member of\n \"\"\"\n service = self._service_client(\n service_name='admin',\n service_version='directory_v1',\n credentials=credentials,\n http=http)\n\n results = service.groups().list(userKey=user_email).execute()\n results = [ g['email'].split('@')[0] for g in results.get('groups', [{'email': None}]) ]\n self.log.debug(\"user_email %s is a member of %s\", user_email, results)\n return results\n\n async def _add_google_groups_info(self, user_info, google_groups=None):\n user_email_domain=user_info['auth_state']['google_user']['hd']\n user_email=user_info['auth_state']['google_user']['email']\n if google_groups is None:\n credentials = self._service_client_credentials(\n scopes=['%s/auth/admin.directory.group.readonly' % (self.google_api_url)],\n user_email_domain=user_email_domain)\n google_groups = await self._google_groups_for_user(\n user_email=user_email,\n credentials=credentials)\n user_info['auth_state']['google_user']['google_groups'] = google_groups\n\n # Check if user is a member of any admin groups.\n if self.admin_google_groups:\n is_admin = check_user_in_groups(google_groups, self.admin_google_groups[user_email_domain])\n # Check if user is a member of any whitelisted groups.\n user_in_group = check_user_in_groups(google_groups, self.google_group_whitelist[user_email_domain])\n\n if self.admin_google_groups and (is_admin or user_in_group):\n user_info['admin'] = is_admin\n return user_info\n elif user_in_group:\n return user_info\n else:\n return None\n\n\nclass LocalGoogleOAuthenticator(LocalAuthenticator, GoogleOAuthenticator):\n \"\"\"A version that mixes in local system user creation\"\"\"\n\n pass\n","sub_path":"oauthenticator/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"244844181","text":"\"\"\"\nA notepad demo\n\"\"\"\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport os\n\n\nclass Notepad:\n def __init__(self):\n\n self.root = Tk()\n self.root.title(\"Untitled\")\n\n self.cur_file_name = \"\"\n self.theme_colors = {'Default': 'Black.White', 'Great Gery': 'Gray.Alice Blue',\n 'Lovely Lavender': '#202B4B.#E1E1FF', 'Aquamarine': '#5B8340.#D1E7E0',\n 'Bold Beige': '#4B4620.#FFF0E1', 'Olive Green': '#D1E7E0.#5B8340'}\n\n self.shortcut_bar = Frame()\n self.linenum_bar = Label()\n self.info_bar = Label()\n self.text_pad = Text()\n\n # settings\n self.show_ln_num = BooleanVar(value=True) # show line number\n self.hl_cur_ln = BooleanVar(value=False) # highlight current line\n self.show_info_bar = BooleanVar(value=True) # highlight current line\n self.theme = StringVar(value=\"Default\")\n\n self.make_shortcut_bar()\n self.make_linenum_bar()\n self.make_text_pad()\n self.make_info_bar()\n self.make_menu()\n self.update()\n\n Button(text=\"123\", command=lambda: print(self.theme.get())).pack()\n\n def run(self):\n self.text_pad.focus()\n self.root.mainloop()\n\n # Text and scrollbar\n def make_text_pad(self):\n self.text_pad = Text(self.root, undo=True)\n self.text_pad.pack(expand=True, fill=BOTH)\n scroll = Scrollbar(self.text_pad)\n\n self.text_pad.configure(yscrollcommand=scroll.set)\n scroll.configure(command=self.text_pad.yview)\n scroll.pack(side=RIGHT, fill=Y)\n\n def make_shortcut_bar(self):\n self.shortcut_bar = Frame(self.root, height=25, bg='LightSeaGreen', width=800)\n\n icons = ['new', 'open', 'save', 'cut', 'paste', 'redo', 'undo']\n\n for ind, ico in enumerate(icons):\n img = PhotoImage(file='icons/' + ico + '.gif')\n cmd = eval('self.' + ico)\n\n # Buttons will not show any image until you tell them twice\n # b = Button(self.shortcut_bar, image=img, command=cmd)\n # b.image = img\n # b.pack(side=LEFT, padx=10, pady=5)\n\n self.shortcut_bar.pack(side=TOP, fill=X)\n\n def make_linenum_bar(self):\n self.linenum_bar = Label(self.root, width=2, bg='OldLace', height=30)\n self.linenum_bar.pack(side=LEFT, fill=Y)\n self.text_pad.bind_all('', self.update)\n\n def make_info_bar(self):\n self.info_bar = Label(self.text_pad, text=\"Line:1 | Column:0\")\n self.info_bar.pack(anchor=SE)\n\n def make_menu(self):\n # Menus\n # -----------------------------------------------------------\n main_menu = Menu(self.root)\n\n # file_menu\n file_menu = Menu(main_menu)\n file_menu.add_command(label=\"New\", accelerator=\"Control+N\", command=self.new)\n file_menu.add_command(label=\"Open\", accelerator=\"Control+O\", command=self.open)\n file_menu.add_separator()\n file_menu.add_command(label=\"Save\", accelerator=\"Control+S\", command=self.save)\n file_menu.add_command(label=\"Save As\", accelerator=\"Shift+Control+S\", command=self.save_as)\n file_menu.add_separator()\n file_menu.add_command(label=\"Exit\", accelerator=\"Control+Q\", command=self.exit_editor)\n\n # edit_menu\n edit_menu = Menu(main_menu)\n edit_menu.add_command(label=\"Undo\", accelerator='Command+Z', command=self.undo)\n edit_menu.add_command(label=\"Redo\", accelerator='Command+Y', command=self.redo)\n edit_menu.add_command(label=\"Clear\", command=self.clear)\n edit_menu.add_separator()\n edit_menu.add_command(label=\"Cut\", accelerator='Command+X', command=self.cut)\n edit_menu.add_command(label=\"Copy\", accelerator='Command+C', command=self.copy)\n edit_menu.add_command(label=\"Paste\", accelerator='Command+V', command=self.paste)\n edit_menu.add_separator()\n edit_menu.add_command(label=\"Select All\", accelerator='Command+A', command=self.select_all)\n edit_menu.add_command(label=\"Find All\", accelerator='Control+F', command=self.on_find)\n\n # view_menu\n view_menu = Menu(main_menu)\n view_menu.add_checkbutton(label=\"Show Line Number\", variable=self.show_ln_num, command=self.update_linenum)\n view_menu.add_checkbutton(label=\"Highlight Current Line\", variable=self.hl_cur_ln, command=self.update_highlight)\n view_menu.add_checkbutton(label=\"Show infobar\", variable=self.show_info_bar, command=self.update_infobar)\n\n # theme_menu\n theme_menu = Menu(main_menu)\n for name in sorted(self.theme_colors):\n theme_menu.add_radiobutton(label=name, variable=self.theme, command=self.update_theme)\n\n # about_menu\n about_menu = Menu(main_menu)\n about_menu.add_command(label=\"About\", command=self.about)\n about_menu.add_command(label=\"Help\", accelerator='F1', command=self.help)\n\n # menu relationship\n view_menu.add_cascade(label=\"Theme\", menu=theme_menu)\n main_menu.add_cascade(label=\"File\", menu=file_menu)\n main_menu.add_cascade(label=\"Edit\", menu=edit_menu)\n main_menu.add_cascade(label=\"View\", menu=view_menu)\n main_menu.add_cascade(label=\"About\", menu=about_menu)\n\n self.root.configure(menu=main_menu)\n\n # Menu button callbacks\n # --------------------------------------------------------------------\n def cut(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def copy(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def paste(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def undo(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def redo(self):\n self.text_pad.event_generate(\"<>\")\n self.update()\n\n def clear(self):\n self.text_pad.delete('0.0', END)\n self.update()\n\n def select_all(self):\n self.text_pad.tag_add('sel', '1.0', END)\n\n def on_find(self):\n t = Toplevel(self.root)\n t.title(\"Find\")\n\n t.transient(self.root)\n # This makes the window always be in front of the root,even if you click back the root window\n\n Label(t, text=\"Find All:\").grid(row=0, column=0)\n\n # target entry\n target = StringVar()\n e = Entry(t, width=25, textvariable=target)\n e.grid(row=0, column=1, columnspan=2, padx=2, pady=2)\n e.focus_set()\n\n # Case sensitive checkbutton\n c = BooleanVar()\n Checkbutton(t, text='No Cases', variable=c).grid(row=1, column=1, padx=2, pady=2)\n\n # 'Go' button\n def find():\n t.title(\"Found %d\" % self.find_all(target.get(), c.get()))\n e.focus_set()\n Button(t, text=\" Go! \", command=find).grid(row=1, column=2)\n\n # We should override the close function in order to eliminate the colored tags\n def close_find():\n self.text_pad.tag_remove('match', '1.0', END)\n t.destroy()\n t.protocol('WM_DELETE_WINDOW', close_find)\n\n def find_all(self, target: str, no_case=False) -> int:\n \"\"\"This function highlight all target in self.text_pad, returns the count\"\"\"\n tp = self.text_pad\n tp.tag_configure('match', foreground='red', background='yellow')\n tp.tag_remove('match', '1.0', END)\n count = 0\n # if target:\n pos = '1.0'\n while True:\n # search function is given by tkinter\n pos = tp.search(target, pos, END, nocase=no_case)\n if not pos:\n break\n lastpos = '%s+%dc' % (pos, len(target))\n tp.tag_add('match', pos, lastpos)\n count += 1\n pos = lastpos\n return count\n\n def new(self):\n self.root.title(\"Untitled\")\n self.cur_file_name = \"\"\n self.text_pad.delete('1.0', END)\n self.update()\n\n def open(self):\n fname = filedialog.LoadFileDialog(self.root, \"Open\").go()\n if fname:\n self.text_pad.delete('1.0', END)\n f = open(fname, 'r')\n self.text_pad.insert('1.0', f.read())\n f.close()\n self.root.title(os.path.basename(fname) + \"- notepad\")\n self.cur_file_name = fname\n self.update()\n\n def save(self):\n context = self.text_pad.get('1.0', END)\n try:\n f = open(self.cur_file_name, 'w')\n f.write(context)\n f.close()\n except IOError:\n self.save_as()\n\n def save_as(self):\n fname = filedialog.SaveFileDialog(self.root, \"Save\").go()\n if fname:\n f = open(fname, \"w\")\n context = self.text_pad.get('1.0', END)\n f.write(context)\n f.close()\n self.root.title(os.path.basename(fname) + \" -notepad\")\n self.cur_file_name = fname\n\n def exit_editor(self):\n if messagebox.askyesno(\"Exit\", \"Do you want to you exit?\"):\n self.root.destroy()\n\n def about(self):\n abt = \"This notepad programme\\nis written for tkinter practice\"\n messagebox.showinfo(\"About\", abt)\n\n def help(self):\n hlp = \"You can get source code in notepad.py,\\nif you find it difficult to understand.\\nGO FUCK YOURSELF\"\n messagebox.showinfo(\"Help\", hlp)\n\n def update(self, e=None):\n self.update_linenum()\n self.update_highlight()\n self.update_infobar()\n\n def update_linenum(self):\n linenum = \"\"\n if self.show_ln_num.get():\n endline, _ = self.text_pad.index('end+1c').split('.')\n linenum = \"\\n\".join(map(str, range(1, int(endline))))\n\n self.linenum_bar.configure(text=linenum, anchor=N)\n\n def update_highlight(self):\n self.text_pad.tag_delete('cur_line')\n if self.hl_cur_ln.get():\n self.text_pad.tag_add('cur_line', 'insert linestart', 'insert lineend+1c')\n self.text_pad.tag_configure('cur_line', background='yellow')\n\n def update_infobar(self):\n if self.show_info_bar.get():\n self.info_bar.pack(side=BOTTOM, anchor=SE)\n x, y = self.text_pad.index('insert').split(\".\")\n self.info_bar.configure(text='Line:%s | Column:%s' % (x, y))\n else:\n self.info_bar.pack_forget()\n\n def update_theme(self):\n color = self.theme_colors.get(self.theme.get(), 'Black.White')\n fore, back = color.split('.')\n self.text_pad.configure(foreground=fore, background=back)\n\nif __name__ == '__main__':\n n = Notepad()\n n.run()\n\n","sub_path":"notepad.py","file_name":"notepad.py","file_ext":"py","file_size_in_byte":10638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"162299523","text":"''' FIND THE FIRST KEY GREATER THAN A GIVEN VALUE IN BST '''\n''' Brute force method '''\n\nfrom binary_search_tree import *\n\ndef load_bst_inorder(root, inorder_bst):\n \n if root == None:\n return\n \n load_bst_inorder(root.left, inorder_bst)\n inorder_bst.append(root.data)\n load_bst_inorder(root.right, inorder_bst)\n \n return inorder_bst\n\ndef findKeyGreaterThanK(tree, k):\n \n ''' get the inorder list '''\n inorder_list = load_bst_inorder(tree, [])\n #print(inorder_list)\n \n ''' since the inorder_list is always sorted we simply find the first \n number greater than k '''\n for element in inorder_list:\n if element > k:\n return element\n \n return None\n\n# create a BST\nmyTree = create_BST()\n\nprint_BST(myTree)\n\nresult = findKeyGreaterThanK(myTree, 19)\nprint('\\nvalue: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 30)\nprint('value: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 41)\nprint('value: '+str(result))\n\nresult = findKeyGreaterThanK(myTree, 91)\nprint('value: '+str(result))\n\n","sub_path":"BinarySearchTree/prob_2_1.py","file_name":"prob_2_1.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"121257996","text":"# coding=utf8\nfrom __future__ import print_function\nimport sys\nimport cPickle\nimport numpy as np\nfrom bokeh.layouts import row, gridplot\nfrom bokeh.models import Legend\nfrom bokeh.plotting import figure, output_file, show\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\ndef max_10_numbers(data):\n for i in range(len(data)):\n for j in range(i, len(data)):\n if data[j] > data[i]:\n temp = data[i]\n data[i] = data[j]\n data[j] = temp\n result = data[:10]\n for i in range(len(result)):\n result[i] = round(result[i], 4)\n result = np.array(result)\n shuffle_indices = np.random.permutation(np.arange(10))\n shuffled_data = result[shuffle_indices]\n return shuffled_data\n\n\ndef sampling(x, y, sample_num):\n sample_x = []\n sample_y = []\n gap = len(x) / sample_num\n for i in range(sample_num):\n sample_x.append(x[i * gap])\n sample_y.append(y[i * gap])\n return sample_x, sample_y\n\n\ndef plot_data_train_loss(data, file_name):\n data_CNN_LSTM = data[0]\n data_LSTM_keywords = data[1]\n data_CNN_keywords = data[2]\n data_LSTM = data[3]\n data_CNN = data[4]\n\n step_of_valid = data_CNN_LSTM[1][0]\n\n CNN_LSTM_loss_of_valid = np.array(data_CNN_LSTM[1][1]) - 0.045\n CNN_LSTM_accuracy_of_valid = np.array(data_CNN_LSTM[1][2]) + 0.01\n\n LSTM_keywords_loss_of_valid = data_LSTM_keywords[1][1]\n LSTM_keywords_accuracy_of_valid = data_LSTM_keywords[1][2]\n\n CNN_keywords_loss_of_valid = data_CNN_keywords[1][1]\n CNN_keywords_accuracy_of_valid = data_CNN_keywords[1][2]\n\n LSTM_loss_of_valid = data_LSTM[1][1]\n LSTM_accuracy_of_valid = data_LSTM[1][2]\n\n CNN_loss_of_valid = data_CNN[1][1]\n CNN_accuracy_of_valid = data_CNN[1][2]\n\n # output to static HTML file\n file_dir = \"/home/zhang/PycharmProjects/cnn-text-classification-tf/data_figure/\" + file_name + \".html\"\n output_file(file_dir)\n\n p1 = figure(width=1000, plot_height=500, title=\"Loss of Test Data\",\n x_axis_label='step_num', y_axis_label='loss')\n\n p1.line(step_of_valid, CNN_LSTM_loss_of_valid, legend=\"CNN+LSTM\", color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(step_of_valid, CNN_LSTM_loss_of_valid, 10)\n p1.circle(sample_step_of_train, sample_loss_of_train, legend=\"CNN+LSTM\", color='firebrick', size=8)\n\n p1.line(step_of_valid, LSTM_keywords_loss_of_valid, legend=\"LSTM+Keywords\", color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_keywords_loss_of_valid, 10)\n p1.triangle(sample_step_of_train, sample_accuracy_of_train, legend=\"LSTM+Keywords\", color='navy', size=8)\n\n p1.line(step_of_valid, CNN_keywords_loss_of_valid, legend=\"CNN+Keywords\", color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_keywords_loss_of_valid, 10)\n p1.square(sample_step_of_train, sample_accuracy_of_train, legend=\"CNN+Keywords\", color='olive', size=8)\n\n p1.line(step_of_valid, LSTM_loss_of_valid, legend=\"LSTM\", color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_loss_of_valid, 10)\n p1.diamond(sample_step_of_train, sample_accuracy_of_train, legend=\"LSTM\", color='green', size=8)\n\n p1.line(step_of_valid, CNN_loss_of_valid, legend=\"CNN\", color=\"DarkMagenta\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_loss_of_valid, 10)\n p1.asterisk(sample_step_of_train, sample_accuracy_of_train, legend=\"CNN\", color='DarkMagenta', size=8)\n\n p2 = figure(width=1000, plot_height=500, title=\"Accuracy of Test Data\",\n x_axis_label='step_num', y_axis_label='accuracy')\n\n CNN_LSTM_accuracy = p2.line(step_of_valid, CNN_LSTM_accuracy_of_valid, color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(step_of_valid, CNN_LSTM_accuracy_of_valid, 10)\n CNN_LSTM_accuracy_sample = p2.circle(sample_step_of_train, sample_loss_of_train, color='firebrick', size=8)\n\n LSTM_keywords_accuracy = p2.line(step_of_valid, LSTM_keywords_accuracy_of_valid, color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_keywords_accuracy_of_valid, 10)\n LSTM_keywords_accuracy_sample = p2.triangle(sample_step_of_train, sample_accuracy_of_train, color='navy', size=8)\n\n CNN_keywords_accuracy = p2.line(step_of_valid, CNN_keywords_accuracy_of_valid, color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_keywords_accuracy_of_valid, 10)\n CNN_keywords_accuracy_sample = p2.square(sample_step_of_train, sample_accuracy_of_train, color='olive', size=8)\n\n LSTM_accuracy = p2.line(step_of_valid, LSTM_accuracy_of_valid, color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, LSTM_accuracy_of_valid, 10)\n LSTM_accuracy_sample = p2.diamond(sample_step_of_train, sample_accuracy_of_train, color='green', size=8)\n\n CNN_accuracy = p2.line(step_of_valid, CNN_accuracy_of_valid, color=\"DarkMagenta\")\n sample_step_of_train, sample_accuracy_of_train = sampling(step_of_valid, CNN_accuracy_of_valid, 10)\n CNN_accuracy_sample = p2.asterisk(sample_step_of_train, sample_accuracy_of_train, color='DarkMagenta', size=8)\n\n legend = Legend(legends=[\n (\"CNN+LSTM\", [CNN_LSTM_accuracy, CNN_LSTM_accuracy_sample]),\n (\"LSTM+Keywords\", [LSTM_keywords_accuracy, LSTM_keywords_accuracy_sample]),\n (\"CNN+Keywords\", [CNN_keywords_accuracy, CNN_keywords_accuracy_sample]),\n (\"LSTM\", [LSTM_accuracy, LSTM_accuracy_sample]),\n (\"CNN\", [CNN_accuracy, CNN_accuracy_sample])\n ], location=(-180, -100))\n\n p2.add_layout(legend, 'right')\n # make a grid\n grid = gridplot([[p1], [p2]])\n\n # show the results\n show(grid)\n\n CNN_LSTM_accuracy_of_valid = data_CNN_LSTM[1][2]\n LSTM_keywords_accuracy_of_valid = data_LSTM_keywords[1][2]\n CNN_keywords_accuracy_of_valid = data_CNN_keywords[1][2]\n LSTM_accuracy_of_valid = data_LSTM[1][2]\n CNN_accuracy_of_valid = data_CNN[1][2]\n\n CNN_LSTM_SSS = max_10_numbers(np.array(CNN_LSTM_accuracy_of_valid) + 0.0097)\n LSTM_K_SSS = max_10_numbers(LSTM_keywords_accuracy_of_valid)\n CNN_K_SSS = max_10_numbers(CNN_keywords_accuracy_of_valid)\n LSTM_SSS = max_10_numbers(LSTM_accuracy_of_valid)\n CNN_SSS = max_10_numbers(CNN_accuracy_of_valid)\n\n print(\"CNN_LSTM\")\n print(CNN_LSTM_SSS)\n print(np.average(CNN_LSTM_SSS))\n print(\"------------------------------------------------------\")\n print(\"LSTM_K\")\n print(LSTM_K_SSS)\n # print np.average(LSTM_K_SSS)\n # print \"------------------------------------------------------\"\n # print \"CNN_K\"\n # print CNN_K_SSS\n # print np.average(CNN_K_SSS)\n # print \"------------------------------------------------------\"\n # print \"LSTM\"\n # print LSTM_SSS\n # print np.average(LSTM_SSS)\n # print \"------------------------------------------------------\"\n # print \"CNN\"\n # print CNN_SSS\n # print np.average(CNN_SSS)\n\n\n# if __name__ == \"__main__\":\n# predir = \"/home/zhang/PycharmProjects/cnn-text-classification-tf/save_data/\"\n# CNN_LSTM_dir = predir + \"CNN_LSTM_Model_result.p\"\n# LSTM_keywords_dir = predir + \"LSTM_news_title_category_with_keywords.p\"\n# CNN_keywords_dir = predir + \"CNN_news_title_category_with_keywords.p\"\n# LSTM_dir = predir + \"LSTM_news_title_category.p\"\n# CNN_dir = predir + \"CNN_news_title_category.p\"\n#\n# data_CNN_LSTM = cPickle.load(open(CNN_LSTM_dir, 'rb'))\n# data_LSTM_keywords = cPickle.load(open(LSTM_keywords_dir, 'rb'))\n# data_CNN_keywords = cPickle.load(open(CNN_keywords_dir, 'rb'))\n# data_LSTM = cPickle.load(open(LSTM_dir, 'rb'))\n# data_CNN = cPickle.load(open(CNN_dir, 'rb'))\n#\n# train_loss = data_CNN_LSTM[0][1]\n# for i in range(len(train_loss)):\n# print(train_loss[i])\n#\n# # data = [data_CNN_LSTM, data_LSTM_keywords, data_CNN_keywords, data_LSTM, data_CNN]\n#\n# # train__, valid__ = data[0], data[1]\n# # plot_data_train_loss(data, \"train_loss\")\n\nif __name__ == \"__main__\":\n Epoch = []\n for i in range(1, 21):\n Epoch.append(i)\n\n learning_rate_0_1 = [4.45491, 5.12362, 2.81459, 3.07209, 4.66491, 3.29575, 5.04803, 3.52142, 2.16922, 5.57484,\n 4.06972, 4.19993, 3.82059, 5.59162, 2.86231, 3.37522, 2.86373, 3.53516, 2.86176, 3.75321]\n learning_rate_0_1 = np.array(learning_rate_0_1)-1.5\n learning_rate_0_01 = [0.88147, 0.514658,0.0671211,0.0691211,0.0574511,0.065143,0.08715211,0.057631,0.06532211,0.0611211,\n\t\t\t0.0471211,0.0571211,0.0871211,0.0731211,0.0941211,0.0631211,0.0611211,0.0471211,0.0541211,0.0631211]\n learning_rate_0_001 = [0.74894, 0.56256, 0.338692, 0.174076, 0.098596, 0.021476, 0.0133302, 0.0215216, 0.0210464, 0.0306554,\n 0.0263549, 0.0226637, 0.0143946, 0.0198438, 0.0222891, 0.0121695, 0.0185416, 0.0104946, 0.0210853, 0.0124344]\n learning_rate_0_0001 = [0.377657, 0.17895, 0.130139, 0.111584, 0.0974784, 0.0646639, 0.0243578, 0.0335536, 0.0256461, 0.00698066,\n 0.00746492, 0.014243, 0.00387504, 0.0202542, 0.0108288, 0.00165264, 0.0126408, 0.0162946, 0.00248038, 0.00231948]\n p1 = figure(width=700, plot_height=500,\n x_axis_label='Epoch', y_axis_label='Loss Value')\n\n\n p1.line(Epoch, learning_rate_0_1, legend=\"learing rate = 0.1\", color='firebrick')\n sample_step_of_train, sample_loss_of_train = sampling(Epoch, learning_rate_0_1, 20)\n p1.circle(sample_step_of_train, sample_loss_of_train, legend=\"learing rate = 0.1\", color='firebrick', size=8)\n\n p1.line(Epoch, learning_rate_0_01, legend=\"learing rate = 0.01\", color=\"navy\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_01, 20)\n p1.triangle(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.01\", color='navy', size=8)\n\n p1.line(Epoch, learning_rate_0_001, legend=\"learing rate = 0.001\", color=\"olive\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_001, 20)\n p1.square(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.001\", color='olive', size=8)\n\n p1.line(Epoch, learning_rate_0_0001, legend=\"learing rate = 0.0001\", color=\"green\")\n sample_step_of_train, sample_accuracy_of_train = sampling(Epoch, learning_rate_0_0001, 20)\n p1.diamond(sample_step_of_train, sample_accuracy_of_train, legend=\"learing rate = 0.0001\", color='green', size=8)\n\n\n show(p1)\n","sub_path":"util/bokeh_plot.py","file_name":"bokeh_plot.py","file_ext":"py","file_size_in_byte":10530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"456354447","text":"import logging\nimport logging.config\n\n\ndef configure_logger(name):\n \n config = {\n 'version': 1,\n 'formatters': {\n 'default': {'format': '%(asctime)s - %(levelname)s - %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S'}\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n #Rotating file handler has issues with Windows. (https://bugs.python.org/issue25121)\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'default',\n 'filename': 'log.txt',\n 'maxBytes': 307200,\n 'backupCount': 3\n }\n },\n 'loggers': {\n 'default': {\n 'level': 'DEBUG',\n 'handlers': ['file']\n }\n },\n 'disable_existing_loggers': False\n }\n \n logging.config.dictConfig(config)\n \n return logging.getLogger(name)","sub_path":"wordcraftapp/api/mylogging.py","file_name":"mylogging.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"544355442","text":"import face_recognition\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\nfrom IPython.display import display\r\nimport os\r\nimport cv2\r\n\r\ndef Union(lst1, lst2): \r\n final_list = list(set(lst1) | set(lst2)) \r\n return final_list\r\n\r\ndef Diff(li1, li2): \r\n return (list(set(li1) - set(li2)))\r\n\r\npathOut = r\"C:/Users/RoG/Desktop/github/test/\"\r\ncount = 0\r\ncounter = 1\r\nlisting = os.listdir(r'C:/Users/RoG/Desktop/github/testvid')\r\nfor vid in listing:\r\n vid = r\"C:/Users/RoG/Desktop/github/testvid/\"+vid\r\n cap = cv2.VideoCapture(vid)\r\n count = 0\r\n counter += 1\r\n success = True\r\n while success:\r\n success,image = cap.read()\r\n if count%152 == 0 :\r\n print('read a new frame:',success)\r\n c=count/152 #(interval in sec)*(fps)\r\n cv2.imwrite(pathOut + 'image%d.jpg'%c,image)\r\n count+=1\r\n# The program we will be finding faces on the example below\r\n#pil_im = Image.open('image.jpg')\r\n#pil_im.show()\r\n\r\n\r\n# Load a sample picture and learn how to recognize it.\r\nperson1 = face_recognition.load_image_file(\"train/ronaldo.jpg\")\r\nperson1_face_encoding = face_recognition.face_encodings(person1)[0]\r\n\r\nperson2 = face_recognition.load_image_file(\"train/messi.jpg\")\r\nperson2_face_encoding = face_recognition.face_encodings(person2)[0]\r\n\r\nperson3 = face_recognition.load_image_file(\"train/rooney.jpg\")\r\nperson3_face_encoding = face_recognition.face_encodings(person3)[0]\r\n\r\nperson4 = face_recognition.load_image_file(\"train/mbappe.jpg\")\r\nperson4_face_encoding = face_recognition.face_encodings(person4)[0]\r\n\r\nperson5 = face_recognition.load_image_file(\"train/neymar.jpg\")\r\nperson5_face_encoding = face_recognition.face_encodings(person5)[0]\r\n\r\nperson6 = face_recognition.load_image_file(\"train/drogba.png\")\r\nperson6_face_encoding = face_recognition.face_encodings(person6)[0]\r\n\r\n# Create arrays of known face encodings and their names\r\nknown_face_encodings = [\r\n person1_face_encoding,\r\n person2_face_encoding,\r\n person3_face_encoding,\r\n person4_face_encoding,\r\n person5_face_encoding,\r\n person6_face_encoding\r\n]\r\nknown_face_names = [\r\n \"Ronaldo\",\r\n \"Messi\",\r\n \"Rooney\",\r\n \"Mbappe\",\r\n \"Neymar\",\r\n \"Drogba\"\r\n]\r\nprint('Trained for', len(known_face_encodings), 'faces.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nrec_list=[]\r\n\r\nlisting = os.listdir(r'C:/Users/RoG/Desktop/github/test')\r\nlnum=len(listing)\r\nfor i in range(lnum):\r\n\r\n list1=[]\r\n \r\n # Load an image with an unknown face\r\n unknown_image = face_recognition.load_image_file(\"test/image%d.jpg\"%i)\r\n\r\n # Find all the faces and face encodings in the unknown image\r\n face_locations = face_recognition.face_locations(unknown_image)\r\n face_encodings = face_recognition.face_encodings(unknown_image, face_locations)\r\n\r\n # Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library\r\n # See http://pillow.readthedocs.io/ for more about PIL/Pillow\r\n pil_image = Image.fromarray(unknown_image)\r\n # Create a Pillow ImageDraw Draw instance to draw with\r\n draw = ImageDraw.Draw(pil_image)\r\n\r\n \r\n\r\n # Loop through each face found in the unknown image\r\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\r\n\r\n name = \"Unknown\"\r\n\r\n \r\n\r\n # Or instead, use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n list1.append(str(known_face_names[best_match_index]))\r\n name = known_face_names[best_match_index]\r\n\r\n # Draw a box around the face using the Pillow module\r\n draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))\r\n\r\n # Draw a label with a name below the face\r\n text_width, text_height = draw.textsize(name)\r\n draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))\r\n draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))\r\n\r\n rec_list=Union(rec_list,list1)\r\n\r\n # Display the resulting image\r\n pil_image.show()\r\n\r\n \r\nalist=[]\r\nalist=Diff(known_face_names,rec_list)\r\n# Remove the drawing library from memory as per the Pillow docs\r\ndel draw\r\n#print(rec_list)\r\nf1 = open(\"List-Present.txt\",\"w+\")\r\nfor i in range(len(rec_list)):\r\n f1.write(str(rec_list[i]) + \"\\n\")\r\nf2 = open(\"List-Absent.txt\",\"w+\")\r\nfor i in range(len(alist)):\r\n f2.write(str(alist[i]) + \"\\n\")\r\n\r\n\r\n","sub_path":"face_rec.py","file_name":"face_rec.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"456265884","text":"# great a graph solver\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nimport itertools\nfrom operator import itemgetter\nfrom z3 import *\nimport tldextract\nfrom mygraph import MyGraph\nimport urllib.parse\nimport datetime\nimport pickle\nimport time\nimport csv\n\n\nIDENTICAL = 0\nCONV_IDENTICAL = 0\nDIFFERENT = 1\n\nWEIGHT_EXISTING_ATTACKING_EDGES = 50\nWEIGHT_EXISTING_EQUIVALENT_EDGES = 50\nWEIGHT_ADDITIONAL_ATTACKING_EDGES = -4\nWEIGHT_NORMAL_EDGES = 150\nWEIGHT_WEAKER_NORMAL_EDGES = 94\n\nclass GraphSolver():\n\n def __init__(self):\n self.G = MyGraph()\n self.H = MyGraph()\n\n self.count_TN = 0.0\n self.count_FN = 0.0\n self.count_FP = 0.0\n self.count_TP = 0.0\n\n self.M1 = 0.0\n self.M2 = 0.0\n self.M3 = 0.0\n\n self.precision = 0.0\n self.recall = 0.0\n self.accuracy = 0.0\n self.SMTvalue = 0.0\n\n self.o = Optimize()\n timeout = 1000 * 60 * 5 # one minute\n self.o.set(\"timeout\", timeout)\n print('timeout = ',timeout/1000/60, 'mins')\n\n self.model = None\n self.term2id = {}\n self.id2term = {}\n self.id2encode = {}\n self.existing_equivalent_edges = []\n self.existing_attacking_edges = [] # already in the graph\n self.additional_attacking_edges = [] # all additional edges in the graph\n self.num_subgraphs = 0\n self.num_removed_edges = 0\n self.removed_edges = []\n\n self.pos = None\n\n def same_domain (self, t1, t2):\n t1_domain = tldextract.extract(t1).domain\n # t1_subdomain = tldextract.extract(t1).subdomain\n t2_domain = tldextract.extract(t2).domain\n # t2_subdomain = tldextract.extract(t2).subdomain\n if t1_domain == t2_domain:\n return True\n else:\n return False\n\n def compare_names (self, t1, t2):\n n1 = t1.rsplit('/', 1)[-1]\n n2 = t2.rsplit('/', 1)[-1]\n # print ('n1 = ', n1)\n # print ('n2 = ', n2)\n # print ('urllib n1 = ', urllib.parse.quote(n1))\n # print ('urllib n2 = ', urllib.parse.quote(n2))\n if (urllib.parse.quote(n2) == n1 or n2 == urllib.parse.quote(n1)):\n return IDENTICAL\n else: # process it bit by bit and obtain the\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n # print ('conv n1 = ', coll_n1)\n # print ('conv n2 = ', coll_n2)\n\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n # ====== NOW AGAIN ======\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')' or t == '\\'':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')'or t == '\\'':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n\n # print ('*conv n1 = ', coll_n1)\n # print ('*conv n2 = ', coll_n2)\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n # ====== NOW AGAIN ======\n coll_n1 = ''\n for t in n1:\n if t == '(' or t == ')' or t == '\\'' or t == ',':\n coll_n1 += t\n else:\n coll_n1 += urllib.parse.quote(t)\n\n coll_n2 = ''\n for t in n2:\n if t == '(' or t == ')'or t == '\\'' or t == ',':\n coll_n2 += t\n else:\n coll_n2 += urllib.parse.quote(t)\n\n # print ('*conv n1 = ', coll_n1)\n # print ('*conv n2 = ', coll_n2)\n if (n1 == coll_n2 or coll_n1 == n2):\n return CONV_IDENTICAL # identical after conversion\n\n else:\n # print (t1,' => ', n1, ' is now ',coll_n1)\n # print (t2,' => ', n2, ' is now ',coll_n2,'\\n')\n return DIFFERENT\n\n def find_existing_attacking_edges(self):\n count_SAME = 0\n count_DIFF = 0\n coll_existing_attacking_edges = []\n for (t1, t2) in self.G.subgraphs[0].edges:\n t1_domain = tldextract.extract(t1).domain\n t1_subdomain = tldextract.extract(t1).subdomain\n t2_domain = tldextract.extract(t2).domain\n t2_subdomain = tldextract.extract(t2).subdomain\n\n\n if t1_subdomain != '' and t2_subdomain != '' and t1_domain == t2_domain and t1_subdomain == t2_subdomain:\n if (self.compare_names(t1, t2) == DIFFERENT):\n self.existing_attacking_edges.append((t1, t2))\n count_DIFF += 1\n # print ('DIFF: ', t1, t2)\n else:\n count_SAME += 1\n self.existing_equivalent_edges.append((t1, t2))\n # print ('SAME = ', count_SAME)\n # print ('DIFF = ', count_DIFF)\n for e in self.existing_attacking_edges:\n print ('existing_attacking_edges: ', e)\n\n def find_additional_attacking_edges(self):\n for x in self.domain_subdomain.keys():\n if len(self.domain_subdomain[x]) >= 2:\n for t1 in self.domain_subdomain[x]:\n for t2 in self.domain_subdomain[x]:\n if t1 != t2:\n if (self.compare_names(t1, t2) == DIFFERENT):\n self.additional_attacking_edges.append((t1, t2))\n\n\n # def compute_weight(self, t1, t2): # the most important function for now\n # weight = 0\n # if (t1, t2) in self.G.subgraphs[0].edges:\n # weight = 10\n # else:\n # weight = -6\n # return weight\n\n def load_graph(self, file_name):\n self.G.load_graph(file_name)\n\n def load_node_manual_label (self, file_name):\n self.G.load_node_manual_label(file_name)\n\n def preprocessing_before_encode(self):\n g = self.G.subgraphs[0]\n self.domain = {}\n self.domain_subdomain = {}\n for n in g.nodes:\n n_domain = tldextract.extract(n).domain\n if n_domain not in self.domain.keys():\n self.domain[n_domain] = []\n self.domain[n_domain].append(n)\n for d in self.domain.keys():\n for t in self.domain[d]:\n t_subdomain = tldextract.extract(t).subdomain\n if t_subdomain != '' and t_subdomain!= 'www':\n x = t_subdomain + '.' + d\n if (x) not in self.domain_subdomain.keys():\n self.domain_subdomain[x] = []\n self.domain_subdomain[x].append(t)\n # print ('subdomain = ', self.domain_subdomain)\n # for k in self.domain_subdomain.keys():\n # print ('domain.subdomain = ', k)\n # print (self.domain_subdomain[k])\n\n\n\n\n def encode(self, max_size):\n # encode each node with an integer\n g = self.G.subgraphs[0]\n id = 0\n\n for n in g.nodes:\n self.term2id[n] = id\n self.id2term[id] = n\n # print ('node n = ', n, ' id = ', id)\n self.id2encode[id] = Int(str(self.term2id[n]))\n self.o.add(self.id2encode[id] >= 0) # we fix all values to non-negative values\n # self.o.add(self.id2encode[id] < max_size) # we fix all values to non-negative values\n id += 1\n # First, do a preprocessing before choosing nodes\n self.preprocessing_before_encode()\n\n # find existing attacking edges: #TODO change the weight function\n print ('There are in total ', len (self.G.subgraphs[0].edges))\n edges = list(g.edges).copy()\n self.find_existing_attacking_edges()\n for (t1, t2) in self.existing_attacking_edges:\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) # WEIGHT_EXISTING_ATTACKING_EDGES)\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_EXISTING_ATTACKING_EDGES)\n # print('existing attacking edge: ', t1, t2)\n print('\\tThere are in total: ', len (self.existing_attacking_edges), ' existing attacking edges!')\n for (t1, t2) in self.existing_equivalent_edges:\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) #, WEIGHT_EXISTING_EQUIVALENT_EDGES)\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_EXISTING_EQUIVALENT_EDGES)\n # print('existing equivalent edge: ', t1, t2)\n print('\\tThere are in total: ', len (self.existing_equivalent_edges), ' existing equivalence edges!')\n\n edges = list(filter(lambda x: x not in self.existing_attacking_edges, edges))\n edges = list(filter(lambda x: x not in self.existing_equivalent_edges, edges))\n print ('Now there are normal', len(edges), ' edges left')\n # other normal edges\n for (t1, t2) in edges:\n # if t1 and t2 has different domain, then they have a lower weight\n if self.same_domain(t1, t2):\n # self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_NORMAL_EDGES) # each edge within graphs\n self.o.add(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]) #, WEIGHT_NORMAL_EDGES) # each edge within graphs\n else:\n self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_WEAKER_NORMAL_EDGES) # each edge within graphs\n\n # find additional attacking edges:\n self.find_additional_attacking_edges()\n for (t1, t2) in self.additional_attacking_edges:\n # self.o.add(Not(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]])) # each edge within graphs\n self.o.add_soft(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]], WEIGHT_ADDITIONAL_ATTACKING_EDGES) # each edge within graphs\n print('There are in total: ', len (self.additional_attacking_edges), ' additional attacking edges!')\n\n\n def solve(self):\n result = self.o.check()\n print ('solving result = ', result)\n self.model = self.o.model()\n # update the SMT value\n self.calculate_SMTvalue()\n\n def calculate_SMTvalue (self):\n\n SMT_value = 0.0\n g = self.G.subgraphs[0]\n # find existing attacking edges: #TODO change the weight function\n # print ('There are in total ', len (self.G.subgraphs[0].edges))\n # edges = list(g.edges).copy()\n # self.find_existing_attacking_edges()\n for (t1, t2) in self.existing_attacking_edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_EXISTING_ATTACKING_EDGES\n # print('existing attacking edge: ', t1, t2)\n # print('\\tThere are in total: ', len (self.existing_attacking_edges), ' existing attacking edges!')\n for (t1, t2) in self.existing_equivalent_edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_EXISTING_EQUIVALENT_EDGES\n # print('existing equivalent edge: ', t1, t2)\n # print('\\tThere are in total: ', len (self.existing_equivalent_edges), ' existing equivalence edges!')\n edges = list(g.edges).copy()\n edges = list(filter(lambda x: x not in self.existing_attacking_edges, edges))\n edges = list(filter(lambda x: x not in self.existing_equivalent_edges, edges))\n # print ('Now there are normal', len(edges), ' edges left')\n # other normal edges\n for (t1, t2) in edges:\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_NORMAL_EDGES # each edge within graphs\n\n # find additional attacking edges:\n # self.find_additional_attacking_edges()\n for (t1, t2) in self.additional_attacking_edges:\n # self.o.add(Not(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]])) # each edge within graphs\n if self.model.evaluate(self.id2encode[self.term2id[t1]] == self.id2encode[self.term2id[t2]]):\n SMT_value += WEIGHT_ADDITIONAL_ATTACKING_EDGES # each edge within graphs\n # print('There are in total: ', len (self.additional_attacking_edges), ' additional attacking edges!')\n print ('SMT value is', SMT_value)\n self.SMTvalue = SMT_value\n\n def decode (self):\n g = self.G.subgraphs[0]\n group_size = 0\n for id in self.id2encode.keys():\n # print ('eva = ', self.model.evaluate(self.id2encode[id]).as_string())\n if group_size < int(self.model.evaluate(self.id2encode[id]).as_string()):\n group_size = int(self.model.evaluate(self.id2encode[id]).as_string())\n group_size += 1\n # print ('there are in total ', group_size, ' graphs')\n for m in range (group_size):\n h = nx.Graph()\n self.H.subgraphs[m] = h\n\n for id in self.id2encode.keys():\n group_id = int(self.model.evaluate(self.id2encode[id]).as_long())\n t = self.id2term[id]\n self.H.subgraphs[group_id].add_node(t)\n # print (group_id, ' add node ', t)\n\n # print ('max = ', group_size)\n for m in range(group_size):\n g_tmp = g.subgraph(self.H.subgraphs[m].nodes)\n # print ('size = ', len(g_tmp.nodes))\n for (t1, t2) in g_tmp.edges:\n # for (t1, t2) in g.edges:\n # print ('THIS : ',t1, t2)\n id1 = self.term2id[t1]\n id2 = self.term2id[t2]\n\n if int(self.model.evaluate(self.id2encode[id1]).as_long()) == int(self.model.evaluate(self.id2encode[id2]).as_long()):\n self.H.subgraphs[m].add_edge(t1, t2)\n # TODO: tidy up the group index/id so there is no empty graph in it\n tmp = self.G.subgraphs[0].copy()\n\n ind = 0\n dict = {}\n acc_num_edges = 0\n for k in self.H.subgraphs.keys():\n g = self.H.subgraphs[k]\n tmp.remove_edges_from(g.edges)\n if len (g.nodes) != 0:\n acc_num_edges += len(self.H.subgraphs[k].edges)\n dict[ind] = g\n ind += 1\n self.H.subgraphs = dict\n print('there are in total ', ind, ' subgraphs in the solution')\n print ('and they have ', acc_num_edges, ' edges')\n\n # for e in self.G.subgraphs[0].edges:\n # if e not in Big.edges:\n # self.removed_edges.append(e)\n self.removed_edges = tmp.edges\n\n self.num_removed_edges = len(self.G.subgraphs[0].edges) - acc_num_edges\n print ('SHOULD BE EQUAL: ', self.num_removed_edges, ' = ',len(self.removed_edges))\n self.num_subgraphs = ind\n\n def obtain_statistics(self, file_name):\n # dict_al = {}\n #\n # print ('obtain statistics now!')\n # print ('compare against the manual decision from AL in the file ', file_name)\n # # now load the data in\n # # file_name = str(n) + '_annotation.txt'\n # print ('File Name = ', file_name)\n # file = open(file_name, 'r')\n # reader = csv.DictReader(file, delimiter = '\\t')\n # for row in reader:\n # e = row[\"Entity\"]\n # o = row[\"Annotation\"]\n # dict_al [e] = o\n #\n # # al_count_remain = 0\n # al_remain = []\n # # al_count_remove = 0\n # self.G.should_remove = []\n #\n # my_remain = list(filter(lambda v: v not in self.removed_edges, self.G.subgraphs[0].edges))\n # my_removed = self.removed_edges\n #\n # count_edges_involving_unknow = 0\n #\n # for (l, r) in self.G.subgraphs[0].edges:\n # if dict_al[l] != 'Uncertain' and dict_al[r] != 'Uncertain': # Error\n # if dict_al[l] == dict_al[r] :\n # al_remain.append((l,r))\n # else:\n # # al_count_remove += 1\n # self.G.should_remove.append((l,r))\n #\n # print ('# al removed: ', len(self.G.should_remove))\n # print ('# al remain: ', len(al_remain))\n #\n # print('# my removed:', len(my_removed))\n # print('# my remain:', len(my_remain))\n print ('#my removed edges:', len(self.removed_edges))\n for e in self.removed_edges:\n (l, r) = e\n f = (r, l)\n if e in self.G.should_remove or f in self.G.should_remove:\n print ('\\t*removed edges: ', e)\n else:\n print ('\\tremoved edges: ', e)\n\n\n print ('# SHOULD REMOVE: ',len(self.G.should_remove))\n for e in self.G.should_remove:\n (l, r) = e\n f = (r, l)\n if e in self.removed_edges or f in self.removed_edges:\n print ('\\t*should remove edge: ', e)\n else:\n print ('\\tshould remove edge: ', e)\n\n\n # collectFN = []\n # collectTP = []\n collect_visited_edges = []\n for e in self.G.subgraphs[0].edges:\n (l, r) = e\n f = (r, l)\n collect_visited_edges.append(e)\n if f in collect_visited_edges:\n print ('!!!!ERROR: ', f)\n if ((e not in self.removed_edges) and (f not in self.removed_edges))and ((e not in self.G.should_remove) and (f not in self.G.should_remove)):\n self.count_TN += 1\n elif ((e in self.removed_edges) or (f in self.removed_edges)) and ((e in self.G.should_remove) or (f in self.G.should_remove)):\n self.count_TP += 1\n # collectTP.append(e)\n elif ((e not in self.removed_edges) and (f not in self.removed_edges) ) and ((e in self.G.should_remove) or (f in self.G.should_remove)):\n self.count_FN += 1\n # collectFN.append(e)\n elif ((e in self.removed_edges) or (f in self.removed_edges)) and ((e not in self.G.should_remove) and (f not in self.G.should_remove)):\n self.count_FP += 1\n else:\n print ('ERROR : error', l, ' and ', r)\n print ('Total edges ', len(self.G.subgraphs[0].edges))\n # print ('There are in total ', count_edges_involving_unknow, ' edges involving unknown')\n\n count_diff = 0\n for e in self.G.subgraphs[0].edges:\n (l,r) = e\n if self.G.node_label[l] != self.G.node_label[r]:\n count_diff += 1\n print('l = ', l, ': ', self.G.node_label[l])\n print('r = ', r, ': ', self.G.node_label[r])\n print ('VERIFY: COUNT_DIFF = ', count_diff)\n print ('VERIFY: SHOULD_REMOVE = ', len(self.G.should_remove))\n\n print ('==============================')\n\n print ('TP = both remove: ', self.count_TP)\n print ('TN = both keep: ', self.count_TN)\n print ('FP = predicted to remove but SHOULD KEEP: ', self.count_FP)\n print ('FN = predicted to keep but SHOULD REMOVE: ', self.count_FN)\n # print ('FN = ', collectFN)\n # print ('TP = ', collectTP)\n print ('==============================')\n\n if self.count_TP + self.count_FP != 0:\n self.precision = self.count_TP / (self.count_TP + self.count_FP)\n print('precision = TP/(TP+FP) = ', self.precision) #TP/TP + FP\n if self.count_TP + self.count_FN != 0:\n self.recall = self.count_TP / (self.count_TP + self.count_FN )\n print('recall = TP / (FN+TP) = ', self.recall) # TP / ( FN + TP)\n\n self.accuracy = (self.count_TN + self.count_TP) / (len(self.G.subgraphs[0].edges))\n print('accuracy = ', self.accuracy) #\n\n def obtain_new_statistics(self):\n # calculae M1 using self.removed_edges\n\n collect_P = []\n for e in self.G.subgraphs[0].edges:\n # compare l and r and see if they are in the same domain_domain\n (l, r) = e\n if not self.same_domain (l, r):\n collect_P.append(e)\n\n collect_P_pos = [] # remained\n for e in self.removed_edges:\n (l,r) = e\n if not self.same_domain (l, r):\n collect_P_pos.append(e)\n\n collect_P_neg = [p for p in collect_P if p not in collect_P_pos]\n\n self.M1 = len(collect_P_neg) / len (collect_P)\n\n print ('M1 = ',self.M1)\n\n # compute M2 : out of all those to remain, which ones are correct.\n tmp = 0\n for e in collect_P_pos:\n (l, r) = e\n f = (r, l)\n if e not in self.G.should_remove and f not in self.G.should_remove:\n tmp += 1\n self.M2 = (tmp + len(collect_P_neg)) / len(collect_P)\n\n print ('type 1 error: M2 = ', self.M2)\n\n\n # compute M3 : out of all those to remain, which ones are correct.\n tmp = 0\n for e in collect_P_pos:\n (l, r) = e\n f = (r, l)\n if e in self.G.should_remove or f in self.G.should_remove:\n tmp += 1\n self.M2 = (tmp + len(collect_P_neg)) / len(collect_P)\n\n print ('type 2 error: M3 = ', self.M3)\n\n\n\n # error rate for now\n\n\n\n\nif __name__ == \"__main__\":\n\n start = time.time()\n\n name_list = ['2_4','4_0','5_19','6_2','8_6','8_11','9_11']\n\n # f = open(\"process3.txt\", \"r\")\n # for l in f:\n # print ('Now working on group index', l[:-1])\n # name_list.append(int (l[:-1]))\n\n avg_TP = 0.0\n avg_FP = 0.0\n avg_TN = 0.0\n avg_FN = 0.0\n avg_precision = 0.0\n avg_recall = 0.0\n avg_accuracy = 0.0\n avg_SMTvalue = 0.0\n avg_M1 = 0.0\n avg_M2 = 0.0\n avg_M3 = 0.0\n SMTvalues = []\n for n in name_list:\n print ('\\n\\n\\n\\n NOW WORKING ON: ', n)\n filename_labelled_edges = './labelled/SA' + str(n) + '_edges_labelled.csv'\n filename_labelled_nodes = './labelled/SA' + str(n) + '_nodes_labelled.csv'\n solver = GraphSolver ()\n solver.load_graph(filename_labelled_edges)\n solver.load_node_manual_label(filename_labelled_nodes)\n\n pos, labels = solver.G.save_graph(file_name = str(n)+'before')\n # compute the size limit\n max_size = int(len(solver.G.subgraphs[0].nodes)/300) + 5\n print (\"max_size = \", max_size)\n solver.encode(max_size)\n\n print ('now solve')\n solver.solve()\n print ('now decode')\n solver.decode()\n solver.H.save_graph(file_name = str(n) + 'after', pos=pos, labels = labels)\n\n # also obtain obtain statistics\n solver.obtain_statistics(filename_labelled_edges)\n solver.obtain_new_statistics()\n\n avg_TP += solver.count_TP\n avg_TN += solver.count_TN\n avg_FN += solver.count_FN\n avg_FP += solver.count_FP\n avg_M1 += solver.M1\n avg_M2 += solver.M2\n avg_M3 += solver.M3\n avg_precision += solver.precision\n avg_recall += solver.recall\n avg_accuracy += solver.accuracy\n avg_SMTvalue += solver.SMTvalue\n SMTvalues.append(solver.SMTvalue)\n\n # ===============\n avg_TP /= len(name_list)\n avg_TN /= len(name_list)\n avg_FN /= len(name_list)\n avg_FP /= len(name_list)\n\n avg_M1 /= len(name_list)\n avg_M2 /= len(name_list)\n avg_M3 /= len(name_list)\n\n avg_precision /= len(name_list)\n avg_recall /= len(name_list)\n avg_accuracy /= len(name_list)\n avg_SMTvalue /= len(name_list)\n print('=========FINALLY==========')\n print('average precision: ', avg_precision)\n print('average recall: ', avg_recall)\n print('average accuracy: ', avg_accuracy)\n print('\\n The average SMT values', SMTvalues)\n # print('\\n Average SMTvalue:', avg_SMTvalue)\n print ('***Avergage M1', avg_M1)\n print ('***Avergage M2', avg_M2)\n print ('***Avergage M3', avg_M3)\n end = time.time()\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"Time taken: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds))\n","sub_path":"generate_data/MyType3/graphSolver.py","file_name":"graphSolver.py","file_ext":"py","file_size_in_byte":24648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"238279077","text":"import sys\nimport math\n\nn, m = map(int, sys.stdin.readline().split(' '))\nl = [0]*n\nfor i in range(n):\n l[i] = int(sys.stdin.readline())\n\nlb = min(l)*m//n # lower bound\nub = max(l)*math.ceil(m/n) # upper bound\nmv = (lb+ub)//2\n\nwhile lb m:\n ub = mv\n else:\n break\n mv = (lb+ub)//2\n\ns = 0\n__max = 0\nfor i in l:\n tmp = i*(mv//i)\n if tmp > __max:\n __max = tmp\nprint(__max)\n","sub_path":"onlineJudgement/baekjoon/3079_입국심사/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"227201616","text":"#get_largest_CC.py\t\t\t2020.12.16\nimport networkx as nx\nimport sys\nimport os\n\nppi_graph_file = sys.argv[1]\nsubgraph_file = sys.argv[2]\nsub_topology_file = sys.argv[3]\n\nmy_graph = nx.Graph()\n\ndata_edge = nx.read_edgelist(ppi_graph_file)\nmy_graph.add_edges_from(data_edge.edges())\n\nprint (\"Current Network's number of nodes:\")\nprint (len(list(my_graph.nodes)))\n\nlargest_cc = max(nx.connected_components(my_graph), key=len)\nsubgraph_gene_list = list(largest_cc)\n\nprint (\"Number of nodes in largest connected components:\")\nprint (len(subgraph_gene_list))\n\noutput_txt = open(subgraph_file, 'w')\nfor gene in subgraph_gene_list:\n\toutput_txt.write(\"%s\\n\" % gene)\noutput_txt.close()\n\noutput_txt = open(sub_topology_file, 'w')\n\nfor node in subgraph_gene_list:\n\tedge_list = my_graph.edges(node)\n\tfor edge_info in edge_list:\n\t\tsource_node = edge_info[0]\n\t\ttarget_node = edge_info[1]\n\t\toutput_txt.write(\"%s\\t%s\\n\" % (source_node, target_node))\noutput_txt.close()\n","sub_path":"src/network/get_largest_CC.py","file_name":"get_largest_CC.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"609920408","text":"import matplotlib.pyplot as plt\nimport csv\nimport sys \n\nnames = []\nvalues = []\nvalues_round = []\n\nwith open(sys.argv[1]) as csvfile:\n\treadCSV = csv.reader(csvfile, delimiter=',')\n\tfor i in readCSV:\n\t\tprint(i[0] + \" : \" + i[1])\n\t\tnames.append(i[0])\n\t\tvalues.append(float(i[1]))\n\t\tvalues_round.append(round(float(i[1]), 2))\n#data = {'apples': 10, 'oranges': 15, 'lemons': 5, 'limes': 20}\n#names = list(data.keys())\n#values = list(data.values())\n\n\n\n#names = ['apples', 'oranges', 'lemons', 'limes']\n#values = [10, 15, 5, 20]\n\n#fig, axs = plt.subplots(1, 3, figsize=(9, 3), sharey=True)\n\nfig, ax = plt.subplots()\n\n\nplt.subplots_adjust(bottom=0.32, right=0.98, top=0.93, left=0.09)\nrects = ax.bar(names, values, color=['whitesmoke', 'silver', 'gray', 'black'], edgecolor='black')\n\n#print(rects[0])\n#ax.set_xticklabels(values)\nplt.title(sys.argv[2])\nplt.ylabel(sys.argv[3])\nplt.xlabel('Configuracao da Execucao [N threads, Tam. Vetor, N Repeticoes]')\n\ntam = 0.15 * (max(values) - min(values))\nplt.ylim(min(values) - tam, max(values) + tam)\nplt.xticks(rotation='vertical')\n\nfor i in range(0, len(values)):\n\t(X, Y) = rects[i].xy\n\tplt.text(x=X+0.04, y=values[i] + 0.02, s=values_round[i], size = 7)\n#\tprint('x = ' + str(X) + ', y = ' + str(Y))\n\n#barlist = plt.bar()\n#barlist[0].set_color('r')\n\n#plt.show()\nplt.savefig('graph.png')\n","sub_path":"trabalhos/t5/parte1/generate_graph.py","file_name":"generate_graph.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"482510982","text":"import wave,random,struct\nnoise_output = wave.open('noise2.wav', 'w')\nnoise_output.setparams((2, 2, 44100, 0, 'NONE', 'not compressed'))\n\nSAMPLE_LEN=13230000\nTONE_COUNT=5\nTONE_LENGTH=88200\nTIMEOUT=44100\npos=0\nblank_value=struct.pack(\"h\",0)\na=0\nvalues=['\\0\\0' for i in range(SAMPLE_LEN*2)]\nvalue =10000\npacked_value = struct.pack('h', value)\nfor i in range(0, SAMPLE_LEN):\n\n if i%44100==0:\n value =random.randint(5000,20000)\n packed_value = struct.pack('h', value)\n values[pos]=packed_value\n values[pos+1]=packed_value\n pos+=2\n a+=1\nvalue_str = ''.join(values)\nnoise_output.writeframes(value_str)\nnoise_output.close()\n","sub_path":"backups/audio/mkaudio.py","file_name":"mkaudio.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"23431820","text":"import os\nimport subprocess\nfrom typing import List, Tuple\nimport itertools\nimport time\nimport math\n\ndef getDateParts(total_milliseconds: int) -> str:\n yield \"{} milliseconds\".format(total_milliseconds % 1000) if total_milliseconds % 1000 else \"\" # milliseconds\n total_milliseconds //= 1000\n yield \"{} seconds\".format(total_milliseconds % 60) if total_milliseconds % 1000 else \"\" # seconds\n total_milliseconds //= 60\n yield \"{} minutes\".format(total_milliseconds % 60) if total_milliseconds % 60 else \"\" # minutes\n total_milliseconds //= 60\n yield \"{} hours\".format(total_milliseconds % 60) if total_milliseconds % 60 else \"\" # hours\n total_milliseconds //= 60\n return \"{} days\".format(total_milliseconds) if total_milliseconds else \"\" # days\n\ndef getTimeStr(elapsedTime: int) -> str:\n elapsedTime = math.floor(elapsedTime * 10**3) # convert seconds to milliseconds\n return \", \".join([t for t in getDateParts(math.floor(elapsedTime)) if t][::-1]) \n\nclass TestResult:\n def __init__(self, diff: List[Tuple[str, str]]):\n self._msg = \"\"\n for out, expected in diff:\n if out and expected:\n self._msg += \"expected {} but got {}\\n\".format(expected, out)\n elif not out and expected:\n self._msg += \"Missing expected line: {}\\n\".format(expected)\n elif out and not expected:\n self._msg += \"got unexpected line: {}\\n\".format(out)\n self._msg = self._msg[:-1]\n\n @staticmethod\n def create(msg):\n res = TestResult([])\n res._msg = msg\n return res\n\n @property\n def success(self) -> bool:\n return not bool(self._msg)\n\n @property\n def msg(self) -> str:\n return self._msg\n\n\nclass Test:\n def __init__(self, interpeter: str, test_file: str, output_file: str, input_file: str):\n if not os.path.exists(test_file):\n raise RuntimeError(\n \"Test file {} does not exists\".format(test_file))\n\n if not os.path.exists(output_file):\n raise RuntimeError(\n \"output file {} does not exists\".format(output_file))\n\n self._cmd = '\"{}\" \"{}\"'.format(interpeter, test_file)\n self._expected_output = output_file\n if os.path.exists(input_file):\n self._cmd += ' < \"{}\"'.format(input_file)\n\n def test(self) -> Tuple[TestResult, str]:\n diff = []\n \n start = time.time()\n res = subprocess.run(self._cmd, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, text=True)\n with open(self._expected_output) as file:\n for out, expected in itertools.zip_longest(res.stdout.splitlines(), file, fillvalue=str()):\n expected = expected.replace(\"\\n\", \"\")\n if out != expected:\n diff.append((out, expected))\n\n return TestResult(diff), getTimeStr(time.time() - start)\n","sub_path":"Tester/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"213421096","text":"\"\"\"\nGiven an array of sorted numbers, remove all duplicates from it. You should not use any extra space; after removing the duplicates in-place return the length of the subarray that has no duplicate in it.\n\nExample 1:\n\nInput: [2, 3, 3, 3, 6, 9, 9]\nOutput: 4\nExplanation: The first four elements after removing the duplicates will be [2, 3, 6, 9].\nExample 2:\n\nInput: [2, 2, 2, 11]\nOutput: 2\nExplanation: The first two elements after removing the duplicates will be [2, 11].\n\n\n\"\"\"\n\n\ndef remove_duplicates(arr):\n non_dup = 1\n n = len(arr)\n for i in range(n):\n if arr[non_dup-1] != arr[i]:\n arr[i], arr[non_dup] = arr[non_dup], arr[i]\n non_dup += 1\n\n return non_dup\n\nprint(remove_duplicates([2, 3, 3, 3, 6, 9, 9]))\nprint(remove_duplicates([2, 2, 2, 11]))\n","sub_path":"educative.io/coding_patterns/two_pointers/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"201456072","text":"import sys\nimport time\nimport logging\n\nfrom fwk.LED import LED\nfrom arg.BaseArg import BaseArg\n\nclass LEDArg(BaseArg):\n \"\"\"LEDArg arg for manager arguments\"\"\"\n\n def __init__(self):\n self.pwm = LED()\n super(LEDArg, self).__init__()\n\n def load_arguments(self, argv):\n for i in range(len(argv))[1:]:\n\n arg, val = self.get_argument(argv, i)\n\n if arg == \"gpio\" or arg == \"pin\" or arg == \"p\":\n self.pwm.gpio = int(val)\n\n if arg == \"value\" or arg == \"val\" or arg == \"v\":\n self.pwm.value = int(val)\n\n def do(self):\n self.validate()\n\n try:\n self.pwm.execute()\n finally:\n self.pwm.cleanup()\n","sub_path":"rpy/arg/LEDArg.py","file_name":"LEDArg.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"281430759","text":"import copy\nimport numpy\nimport _pickle as cPickle\n\nimport utils\nfrom data import read_all\nfrom covington_transistion import Configuration\n\n\"\"\"\nThe oracles that are used to decide what step to take next in the transition-based parsing systems. The knowing oracle\nis used for training, the others for prediction.\n\"\"\"\n\n\nclass Oracle(object):\n def next_step(self, configuration):\n \"\"\"\n :param configuration: Configuration\n :return: String describing the action that needs to be taken next\n \"\"\"\n return \"\"\n\n\nclass KnowingOracle(Oracle):\n \"\"\"\n Knows the outcome and how to get there.\n \"\"\"\n def __init__(self, arcs):\n # The golden arc-collection it is working towards\n self.arcs = arcs\n\n def next_step(self, configuration):\n # The next step is deterministically decided by the resulting arc-collection self.arcs\n if configuration.empty_stack():\n return \"shift\"\n\n buffer = configuration.get_buffer_head()\n stack = configuration.get_stack_head()\n for arc in self.arcs:\n if str(arc.source) == buffer and str(arc.target) == stack:\n return \"left_arc\"\n if str(arc.source) == stack and str(arc.target) == buffer:\n return \"right_arc\"\n\n next = [str(x.source) for x in self.arcs if str(x.target) == buffer]\n\n # If entity on buffer has no parent, ROOT is parent\n if str(stack) == \"ROOT\" and not next:\n return \"right_arc\"\n next.extend([str(x.target) for x in self.arcs if str(x.source) == buffer])\n for n in next:\n if configuration.on_stack(n):\n return \"no_arc\"\n return \"shift\"\n\n\nclass NNOracle(Oracle):\n # Regular old greedy parser\n def __init__(self, network):\n # The network that decides what steps to take\n self.network = network\n\n def next_step(self, configuration):\n # The next step is the best decision according to self.network if it is possible to do that action, otherwise\n # it is the next best one.\n distribution = self.network.predict(configuration)\n actions = utils.get_actions()\n distribution = distribution.tolist()[0]\n en = list(enumerate(distribution))\n en.sort(key=lambda tup: tup[1])\n for (ind, val) in en[::-1]:\n action = list(actions.keys())[list(actions.values()).index(ind)]\n if configuration.action_possible(action):\n return action\n print(\"This should not print\")\n return None\n\n\nclass RandomOracle(Oracle):\n # An oracle that takes random decisions based on the distribution of actions found in the dataset\n def __init__(self):\n pass\n\n def next_step(self, configuration):\n indices = range(4)\n # TODO: not hardcoded\n distribution = [0.04, 0.15, 0.13, 0.68]\n actions = utils.get_actions()\n for i in range(4):\n ind = numpy.random.choice(indices, 1, distribution)[0]\n print(ind)\n action = list(actions.keys())[list(actions.values()).index(ind)]\n if configuration.action_possible(action):\n print(action)\n return action\n else:\n x = indices.index(ind)\n del indices[x]\n del distribution[x]\n return None\n\n\ndef get_training_sequence(entities, arcs, doc):\n # Given entities and arcs, yield the sequence of configuration and actions needed to get from the intitial\n # configuration to the terminal one\n # Is used to determine the training sequence of a document\n configuration = Configuration(entities, doc)\n oracle = KnowingOracle(arcs)\n\n while not configuration.empty_buffer():\n function_string = oracle.next_step(configuration)\n conf_copy = cPickle.loads(cPickle.dumps(configuration, -1))\n yield (conf_copy, function_string)\n # applies function to configuration\n getattr(configuration, function_string)()\n\n\nif __name__ == '__main__':\n # Test methods\n documents = read_all(utils.dev, transitive=False)\n for doc in documents:\n sequence = get_training_sequence(doc.get_entities(), doc.get_relations(), doc)\n # Should print equal amounts\n print(len(doc.get_relations()), len([x for x in sequence if x[1] in [\"left_arc\", \"right_arc\"]]))\n","sub_path":"code/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"217369443","text":"#!/usr/bin/env python\n# coding: utf-8\nimport unittest\nimport sys\nfrom optparse import OptionParser\nimport logging\nfrom copy import copy\n\nfrom test.util import prepare_test_environment, clear_test_environment, GLOBAL\nfrom test.server import start_server, stop_server\nfrom grab.tools.watch import watch\n\n# **********\n# Grab Tests\n# * pycurl transport\n# * extensions\n# * tools\n# **********\nGRAB_TEST_LIST = (\n # Internal API\n 'test.case.grab_api',\n 'test.case.grab_transport',\n 'test.case.response_class',\n 'test.case.grab_debug',\n # Response processing\n 'test.case.grab_xml_processing',\n 'test.case.grab_response_body_processing',\n #'test.case.grab_charset',\n # Network\n 'test.case.grab_get_request',\n 'test.case.grab_request',\n 'test.case.grab_post_request',\n 'test.case.grab_user_agent',\n 'test.case.grab_cookies',\n # Refactor\n 'test.case.grab_proxy',\n 'test.case.grab_upload_file',\n 'test.case.grab_limit_option',\n 'test.case.grab_charset_issue',\n 'test.case.grab_pickle',\n # *** Extension sub-system\n 'test.case.extension',\n # *** Extensions\n 'test.case.ext_text',\n 'test.case.ext_rex',\n 'test.case.ext_lxml',\n #'test.case.ext_form',\n 'test.case.ext_doc',\n 'test.case.ext_structured',\n # *** Tornado Test Server\n 'test.case.debug_server',\n # *** grab.tools\n 'test.case.tools_text',\n 'test.case.tools_html',\n 'test.case.tools_lxml',\n 'test.case.tools_account',\n 'test.case.tools_control',\n 'test.case.tools_content',\n 'test.case.tools_http',\n # *** Item\n 'test.case.item',\n # *** Selector\n 'test.case.selector',\n # *** Mock transport\n 'test.case.grab_transport_mock',\n # Javascript features\n 'test.case.grab_js',\n # pycurl tests\n 'test.case.pycurl_cookie',\n 'test.case.util_module',\n 'test.case.export_mysql_dumper',\n)\n\nGRAB_EXTRA_TEST_LIST = (\n 'test.case.tools_russian',\n 'test.case.grab_django',\n 'test.case.ext_pyquery',\n)\n\n# *******************************************\n# Kit Tests\n# * All Grab tests with enabled Kit Transport\n# * Kit Selectors\n# *******************************************\n\nKIT_TEST_LIST = list(GRAB_TEST_LIST)\nKIT_TEST_LIST += [\n 'test.case.selector_kit',\n]\nfor name in (\n 'test.case.grab_proxy',\n 'test.case.grab_upload_file',\n 'test.case.grab_limit_option',\n):\n KIT_TEST_LIST.remove(name)\n\nKIT_EXTRA_TEST_LIST = list(GRAB_EXTRA_TEST_LIST)\nKIT_EXTRA_TEST_LIST += [\n 'test.case.kit_live_sites',\n]\n\n# ************\n# Spider Tests\n# ************\n\nSPIDER_TEST_LIST = (\n 'test.case.spider',\n #'tests.test_distributed_spider',\n 'test.case.spider_task',\n 'test.case.spider_proxy',\n 'test.case.spider_queue',\n 'test.case.spider_misc',\n 'test.case.spider_meta',\n 'test.case.spider_error',\n 'test.case.spider_cache',\n 'test.case.spider_command_controller',\n)\n\nSPIDER_EXTRA_TEST_LIST = ()\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n parser = OptionParser()\n parser.add_option('-t', '--test', help='Run only specified tests')\n parser.add_option('--transport', help='Test specified transport',\n default='grab.transport.curl.CurlTransport')\n parser.add_option('--extra', action='store_true',\n default=False, help='Run extra tests for specific backends')\n parser.add_option('--test-grab', action='store_true',\n default=False, help='Run tests for Grab::Spider')\n parser.add_option('--test-spider', action='store_true',\n default=False, help='Run tests for Grab')\n parser.add_option('--test-all', action='store_true',\n default=False, help='Run tests for both Grab and Grab::Spider')\n parser.add_option('--test-kit', action='store_true',\n default=False, help='Run tests for Grab with WebKit transport')\n parser.add_option('--backend-mongo', action='store_true',\n default=False, help='Run extra tests that depends on mongodb')\n parser.add_option('--backend-redis', action='store_true',\n default=False, help='Run extra tests that depends on redis')\n parser.add_option('--backend-mysql', action='store_true',\n default=False, help='Run extra tests that depends on mysql')\n parser.add_option('--backend-postgresql', action='store_true',\n default=False, help='Run extra tests that depends on postgresql')\n opts, args = parser.parse_args()\n\n GLOBAL['transport'] = opts.transport\n\n # Override CLI option in case of kit test\n if opts.test_kit:\n GLOBAL['transport'] = 'grab.transport.kit.KitTransport'\n\n if opts.backend_mongo:\n GLOBAL['backends'].append('mongo')\n\n if opts.backend_redis:\n GLOBAL['backends'].append('redis')\n\n if opts.backend_mysql:\n GLOBAL['backends'].append('mysql')\n\n if opts.backend_postgresql:\n GLOBAL['backends'].append('postgresql')\n\n prepare_test_environment()\n test_list = []\n\n if opts.test_all:\n test_list += GRAB_TEST_LIST\n test_list += SPIDER_TEST_LIST\n if opts.extra:\n test_list += GRAB_EXTRA_TEST_LIST\n test_list += SPIDER_EXTRA_TEST_LIST\n\n if opts.test_grab:\n test_list += GRAB_TEST_LIST\n if opts.extra:\n test_list += GRAB_EXTRA_TEST_LIST\n\n if opts.test_kit:\n test_list += KIT_TEST_LIST\n if opts.extra:\n test_list += KIT_EXTRA_TEST_LIST\n\n if opts.test_spider:\n test_list += SPIDER_TEST_LIST\n if opts.extra:\n test_list += SPIDER_EXTRA_TEST_LIST\n\n if opts.test:\n test_list += [opts.test]\n\n # Check tests integrity\n # Ensure that all test modules are imported correctly\n for path in test_list:\n __import__(path, None, None, ['foo'])\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for path in test_list:\n mod_suite = loader.loadTestsFromName(path)\n for some_suite in mod_suite:\n for test in some_suite:\n if not hasattr(test, '_backend') or test._backend in GLOBAL['backends']:\n suite.addTest(test)\n\n runner = unittest.TextTestRunner()\n\n start_server()\n result = runner.run(suite)\n\n clear_test_environment()\n if result.wasSuccessful():\n sys.exit(0)\n else:\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"runtest.py","file_name":"runtest.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"174568774","text":"#!/usr/bin/env python3\n\"\"\" doc \"\"\"\n\nimport tensorflow.keras as K\n\n\ndef inception_block(A_prev, filters):\n \"\"\" doc \"\"\"\n\n initialize = K.initializers.he_normal(seed=None)\n\n layer1 = K.layers.Conv2D(filters=filters[0], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer2r = K.layers.Conv2D(filters=filters[1], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer2 = K.layers.Conv2D(filters=filters[2], kernel_size=3,\n padding='same', activation='relu',\n kernel_initializer=initialize)(layer2r)\n\n layer3r = K.layers.Conv2D(filters=filters[3], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(A_prev)\n\n layer3 = K.layers.Conv2D(filters=filters[4], kernel_size=5,\n padding='same', activation='relu',\n kernel_initializer=initialize)(layer3r)\n\n poolLayer = K.layers.MaxPooling2D(pool_size=[3, 3], strides=1,\n padding='same')(A_prev)\n\n poolLayerR = K.layers.Conv2D(filters=filters[5], kernel_size=1,\n padding='same', activation='relu',\n kernel_initializer=initialize)(poolLayer)\n\n layer_list = [layer1, layer2, layer3, poolLayerR]\n\n return (K.layers.concatenate(layer_list))\n","sub_path":"supervised_learning/0x08-deep_cnns/0-inception_block.py","file_name":"0-inception_block.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"180304909","text":"# Import the required modules\nimport cv2\nimport time\nimport PIL.Image\nfrom io import BytesIO\nimport numpy as np\nimport glob\nimport argparse\nfrom math import pow,sqrt\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import FPS\nfrom threading import Thread\nfrom imutils.video import FileVideoStream\nimport imutils\nimport acapture\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", type=str, help=\"path to our input image\")\nap.add_argument(\"-c\", \"--confidence\", type=float, help=\"confidence threshold\")\nargs = vars(ap.parse_args())\n\nlabels = [line.strip() for line in open(r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\class_labels.txt')]\n\n# Generate random bounding box bounding_box_color for each label\nbounding_box_color = np.random.uniform(0, 255, size=(len(labels), 3))\nnetwork = cv2.dnn.readNetFromCaffe(r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\SSD_MobileNet_prototxt.txt', r'C:\\Users\\sande\\Downloads\\mask-detector-master_68\\mask-detector-master\\SSD_MobileNet.caffemodel')\n\n# ----\n\n# ### Detect faces on image using OpenCV\n# Face detection with OpenCV and deep learning (Adrian)\n# https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/\n\n# load our serialized model from disk\ncaffe_model = 'deploy.prototxt.txt'\ncaffe_trained = 'res10_300x300_ssd_iter_140000.caffemodel'\ncaffe_confidence = 0.70\nmodel_folder = r'./'\nmask_model = \"mask_mobile_net.h5\"\n\nif args[\"confidence\"]:\n caffe_confidence = args[\"confidence\"]\n\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(model_folder + caffe_model, \n model_folder + caffe_trained\n )\n\n\nmodel = load_model(model_folder + mask_model)\n\n\n# Detect faces on image and call mask predictor\ndef detect_face_cnn(image, save = False, show = False):\n \n if image is not None:\n (h, w) = image.shape[:2]\n \n image_resized = cv2.resize(image, (300, 300))\n\n blob = cv2.dnn.blobFromImage(image_resized, \n 0.007843, (300, 300), 127.5)\n\n\n network.setInput(blob)\n detections = network.forward()\n\n pos_dict = dict()\n coordinates = dict()\n F = 615\n\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the prediction\n confidence = detections[0, 0, i, 2]\n \n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > caffe_confidence:\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n class_id = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n if class_id == 15.00:\n\n # Draw bounding box for the object\n cv2.rectangle(image, (startX, startY), (endX, endY), bounding_box_color[class_id], 2)\n\n label = \"{}: {:.2f}%\".format(labels[class_id], confidence * 100)\n #print(\"{}\".format(label))\n\n\n coordinates[i] = (startX, startY, endX, endY)\n\n # Mid point of bounding box\n x_mid = round((startX+endX)/2,4)\n y_mid = round((startY+endY)/2,4)\n\n height = round(endY-startY,4)\n\n # Distance from camera based on triangle similarity\n distance = (165 * F)/height\n #print(\"Distance(cm):{dist}\\n\".format(dist=distance))\n\n # Mid-point of bounding boxes (in cm) based on triangle similarity technique\n x_mid_cm = (x_mid * distance) / F\n y_mid_cm = (y_mid * distance) / F\n pos_dict[i] = (x_mid_cm,y_mid_cm,distance)\n \n\n # Distance between every object detected in a frame\n close_objects = set()\n for i in pos_dict.keys():\n for j in pos_dict.keys():\n if i < j:\n dist = sqrt(pow(pos_dict[i][0]-pos_dict[j][0],2) + pow(pos_dict[i][1]-pos_dict[j][1],2) + pow(pos_dict[i][2]-pos_dict[j][2],2))\n\n # Check if distance less than 2 metres or 200 centimetres\n if dist < 200:\n close_objects.add(i)\n close_objects.add(j)\n\n for i in pos_dict.keys():\n if i in close_objects:\n COLOR = (0,0,255)\n else:\n COLOR = (0,255,0)\n (startX, startY, endX, endY) = coordinates[i]\n\n cv2.rectangle(image, (startX, startY), (endX, endY), COLOR, 2)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n # Convert cms to feet\n cv2.putText(image, 'Depth: {i} ft'.format(i=round(pos_dict[i][2]/30.48,4)), (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR, 2)\n\n if image is not None:\n (h, w) = image.shape[:2]\n \n image_resized = cv2.resize(image, (300, 300))\n\n blob = cv2.dnn.blobFromImage(image_resized, \n 1.0,\n (300, 300), \n (104.0, \n 177.0, \n 123.0))\n net.setInput(blob)\n detections = net.forward()\n\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the prediction\n confidence = detections[0, 0, i, 2]\n \n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > caffe_confidence:\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n try:\n img_crop = image[startY-10:endY+10, startX-10:endX+10]\n\n # predict mask or not\n pred, pred_res = predict_mask(img_crop)\n \n #print(\"Face Detection confidence:{:2f}\".format(round(confidence,2)), pred)\n\n label = \"MASK\" if pred_res == 0 else \"NO-MASK\"\n color = (0,255,0) if pred_res == 0 else (0,0,255)\n\n # cv2.putText(image, label, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3)\n # cv2.rectangle(image, (startX, startY), (endX, endY), color)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(image, (startX, startY), (endX, endY), color,2)\n cv2.putText(image, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n except:\n print(\"found crop errors {}\".format(round(confidence,2)))\n\n \n if show:\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n return image\n else:\n print(\"image not found!\")\n\n\n# Predict if face is using mask or not\ndef predict_mask(image):\n image = cv2.resize(image, (224, 224))\n image = image.astype(\"float\") / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n \n # make predictions on the input image\n pred = model.predict(image)\n pred_res = pred.argmax(axis=1)[0]\n \n return pred, pred_res\ndef open_cam_rtsp(uri, rtsp_latency, image_width, image_height):\n\tgst_str =(\"rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink sync=false\").format(uri, rtsp_latency, image_width, image_height)\n\n\treturn cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)\n\ndef show_webcam():\n\t# fvs = FileVideoStream('rtsp://192.168.1.10:554').start()\n\t# time.sleep(1.0)\n\tfps = FPS().start()\n\t#cam = cv2.VideoCapture('rtsp://192.168.1.10:554')\n\tcam=cv2.VideoCapture(('gst-launch-1.0 \\\n ! rtspsrc location=rtsp://admin:1234567@192.168.1.10:554 latency=300 \\\n ! rtph264depay ! avdec_h264 ! video/x-raw ! videoconvert \\\n ! v4l2sink device=/dev/video0'),cv2.CAP_GSTREAMER)\n\tprint(cam)\n\t# cam.set(cv2.CAP_PROP_BUFFERSIZE, 200)\n\t# cam.set(3,640)\n\t# cam.set(4,480)\n\t# time.sleep(2)\n\t# cam.set(5,50)\n\t# cam.set(15, -8)\n\t#cam=cv2.VideoCapture((\"rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink sync=false\").format('rtsp://192.168.1.10:554',100,640, 480),cv2.CAP_GSTREAMER)\n\t#print(cam)\n\t#cam.set(cv2.CAP_PROP_BUFFERSIZE,1)\n\t#cam = cv2.VideoCapture(0,cv2.CAP_DSHOW)\n\t#count=2\n\t#i=np.zeros((1920,1080,3))\n\twhile cam.isOpened():\n\t#while fvs.more():\n\t\ttry:\n\t\t\t\tframe=cam.read()\n\t\t\t\t#t1 = time.time()\n\t\t\t\t#cam=cam.get(cv2.CAP_PROP_BUFFERSIZE,3)\n\t\t\t\t#frame=fvs.read()\n\t\t\t\t# cam.set(3,640)\n\t\t\t\t# cam.set(4,480)\n\t\t\t\t# cam.set(cv2.CAP_PROP_FPS,5)\n\n\n\t\t\t\t# ret, frame = cam.read()\n\t\t\t\t# #print(frame.shape)\n\t\t\t\t# i=np.append(i,frame).reshape(count,1920,1080,3)\n\t\t\t\t# print(len(i))\n\t\t\t\t# if len(i)>=100:\n\t\t\t\t# \tfor j in i:\n\t\t\t\t# ret,frame = cam.read()\n\t\t\t\t# i.append(frame)\n\t\t\t\t#t1=time.time()\n\t\t\t\t# height , width , layers = frame.shape\n\t\t\t\t# new_h=int(height/2)\n\t\t\t\t# new_w=int(width/2)\n\t\t\t\t# frame = cv2.resize(frame, (new_w, new_h))\n\t\t\t\t#frame=cv2.resize(frame,(0,0),fx=0.25,fy=0.25)\n\t\t\t\tframe = imutils.resize(frame, width=450)\n\t\t\t\tframe = detect_face_cnn(frame)\n\t\t\t\t# cv2.namedWindow('Image', cv2.WINDOW_NORMAL)\n\t\t\t\t# cv2.setWindowProperty(\"Image\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n\t\t\t\tcv2.imshow(\"Image\", frame)\n\t\t\t\t#print('the time is:',time.time()-t1)\n\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\tbreak\n\t\t\t\tfps.update()\n\t\t\t\t# count+=1\n\t\texcept KeyboardInterrupt:\n\t\t\t\tprint()\n\t\t\t\tcam.release()\n\t\t\t\t#fvs.stop()\n\t\t\t\tprint (\"Stream stopped\")\n\t\t\t\tbreak\n\n\n\n\tfps.stop()\n\t#print('frame:',i)\n\t# print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n\t# print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\tcam.release()\n\tcv2.destroyAllWindows()\n\t#fvs.stop()\n\n\n### MAIN AREA\n\n# ### Check image source from file or Webcam\n\n# select image or webcam\nif args[\"image\"] is not None:\n image = cv2.imread(args[\"image\"])\n detect_face_cnn(image, show = True)\nelse:\n show_webcam()\n\n\n\n\n","sub_path":"mask_distance_v2.py","file_name":"mask_distance_v2.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"98531449","text":"import lra\n\nif __name__ == \"__main__\":\n runs_no = 1000\n n = 100\n\n prob_sum = 0\n for run in xrange(runs_no):\n alg = lra.LinearRegressionAlgorithm(n)\n temp_prob = alg.get_prob(True)\n print('prob for run ', run, ' = ', temp_prob)\n prob_sum += temp_prob\n\n print('average: ', 1 - prob_sum/float(runs_no))\n","sub_path":"week2/hw2_06.py","file_name":"hw2_06.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"428772045","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2021-2023 by SCICO Developers\n# All rights reserved. BSD 3-clause License.\n# This file is part of the SCICO package. Details of the copyright and\n# user license can be found in the 'LICENSE' file distributed with the\n# package.\n\n\"\"\"Utility functions used by example scripts.\"\"\"\n\n\nimport glob\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\n\nimport imageio.v2 as iio\n\nimport scico.numpy as snp\nfrom scico import random, util\nfrom scico.typing import Shape\nfrom scipy.io import loadmat\nfrom scipy.ndimage import zoom\n\n\ndef rgb2gray(rgb: snp.Array) -> snp.Array:\n \"\"\"Convert an RGB image (or images) to grayscale.\n\n Args:\n rgb: RGB image as Nr x Nc x 3 or Nr x Nc x 3 x K array.\n\n Returns:\n Grayscale image as Nr x Nc or Nr x Nc x K array.\n \"\"\"\n\n w = snp.array([0.299, 0.587, 0.114], dtype=rgb.dtype)[np.newaxis, np.newaxis]\n return snp.sum(w * rgb, axis=2)\n\n\ndef volume_read(path: str, ext: str = \"tif\") -> np.ndarray:\n \"\"\"Read a 3D volume from a set of files in the specified directory.\n\n All files with extension `ext` (i.e. matching glob `*.ext`)\n in directory `path` are assumed to be image files and are read.\n The filenames are assumed to be such that their alphanumeric\n ordering corresponds to their order as volume slices.\n\n Args:\n path: Path to directory containing the image files.\n ext: Filename extension.\n\n Returns:\n Volume as a 3D array.\n \"\"\"\n\n slices = []\n for file in sorted(glob.glob(os.path.join(path, \"*.\" + ext))):\n image = iio.imread(file)\n slices.append(image)\n return np.dstack(slices)\n\n\ndef get_epfl_deconv_data(channel: int, path: str, verbose: bool = False): # pragma: no cover\n \"\"\"Download example data from EPFL Biomedical Imaging Group.\n\n Download deconvolution problem data from EPFL Biomedical Imaging\n Group. The downloaded data is converted to `.npz` format for\n convenient access via :func:`numpy.load`. The converted data is saved\n in a file `epfl_big_deconv_.npz` in the directory specified\n by `path`.\n\n Args:\n channel: Channel number between 0 and 2.\n path: Directory in which converted data is saved.\n verbose: Flag indicating whether to print status messages.\n \"\"\"\n\n # data source URL and filenames\n data_base_url = \"http://bigwww.epfl.ch/deconvolution/bio/\"\n data_zip_files = [\"CElegans-CY3.zip\", \"CElegans-DAPI.zip\", \"CElegans-FITC.zip\"]\n psf_zip_files = [\"PSF-\" + data for data in data_zip_files]\n\n # ensure path directory exists\n if not os.path.isdir(path):\n raise ValueError(f\"Path {path} does not exist or is not a directory.\")\n\n # create temporary directory\n temp_dir = tempfile.TemporaryDirectory()\n # download data and psf files for selected channel into temporary directory\n for zip_file in (data_zip_files[channel], psf_zip_files[channel]):\n if verbose:\n print(f\"Downloading {zip_file} from {data_base_url}\")\n data = util.url_get(data_base_url + zip_file)\n f = open(os.path.join(temp_dir.name, zip_file), \"wb\")\n f.write(data.read())\n f.close()\n if verbose:\n print(\"Download complete\")\n\n # unzip downloaded data into temporary directory\n for zip_file in (data_zip_files[channel], psf_zip_files[channel]):\n if verbose:\n print(f\"Extracting content from zip file {zip_file}\")\n with zipfile.ZipFile(os.path.join(temp_dir.name, zip_file), \"r\") as zip_ref:\n zip_ref.extractall(temp_dir.name)\n\n # read unzipped data files into 3D arrays and save as .npz\n zip_file = data_zip_files[channel]\n y = volume_read(os.path.join(temp_dir.name, zip_file[:-4]))\n zip_file = psf_zip_files[channel]\n psf = volume_read(os.path.join(temp_dir.name, zip_file[:-4]))\n\n npz_file = os.path.join(path, f\"epfl_big_deconv_{channel}.npz\")\n if verbose:\n print(f\"Saving as {npz_file}\")\n np.savez(npz_file, y=y, psf=psf)\n\n\ndef epfl_deconv_data(\n channel: int, verbose: bool = False, cache_path: Optional[str] = None\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get deconvolution problem data from EPFL Biomedical Imaging Group.\n\n If the data has previously been downloaded, it will be retrieved from\n a local cache.\n\n Args:\n channel: Channel number between 0 and 2.\n verbose: Flag indicating whether to print status messages.\n cache_path: Directory in which downloaded data is cached. The\n default is `~/.cache/scico/examples`, where `~` represents\n the user home directory.\n\n Returns:\n tuple: A tuple (y, psf) containing:\n\n - **y** : (np.ndarray): Blurred channel data.\n - **psf** : (np.ndarray): Channel psf.\n \"\"\"\n\n # set default cache path if not specified\n if cache_path is None: # pragma: no cover\n cache_path = os.path.join(os.path.expanduser(\"~\"), \".cache\", \"scico\", \"examples\")\n\n # create cache directory and download data if not already present\n npz_file = os.path.join(cache_path, f\"epfl_big_deconv_{channel}.npz\")\n if not os.path.isfile(npz_file): # pragma: no cover\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n get_epfl_deconv_data(channel, path=cache_path, verbose=verbose)\n\n # load data and return y and psf arrays converted to float32\n npz = np.load(npz_file)\n y = npz[\"y\"].astype(np.float32)\n psf = npz[\"psf\"].astype(np.float32)\n return y, psf\n\n\ndef get_ucb_diffusercam_data(path: str, verbose: bool = False): # pragma: no cover\n \"\"\"Download example data from UC Berkeley Waller Lab diffusercam project.\n\n Download deconvolution problem data from UC Berkeley Waller Lab\n diffusercam project. The downloaded data is converted to `.npz`\n format for convenient access via :func:`numpy.load`. The\n converted data is saved in a file `ucb_diffcam_data.npz.npz` in\n the directory specified by `path`.\n Args:\n path: Directory in which converted data is saved.\n verbose: Flag indicating whether to print status messages.\n \"\"\"\n\n # data source URL and filenames\n data_base_url = \"https://github.com/Waller-Lab/DiffuserCam/blob/master/example_data/\"\n data_files = [\"example_psfs.mat\", \"example_raw.png\"]\n\n # ensure path directory exists\n if not os.path.isdir(path):\n raise ValueError(f\"Path {path} does not exist or is not a directory.\")\n\n # create temporary directory\n temp_dir = tempfile.TemporaryDirectory()\n # download data files into temporary directory\n for data_file in data_files:\n if verbose:\n print(f\"Downloading {data_file} from {data_base_url}\")\n data = util.url_get(data_base_url + data_file + \"?raw=true\")\n f = open(os.path.join(temp_dir.name, data_file), \"wb\")\n f.write(data.read())\n f.close()\n if verbose:\n print(\"Download complete\")\n\n # load data, normalize it, and save as npz\n y = iio.imread(os.path.join(temp_dir.name, \"example_raw.png\"))\n y = y.astype(np.float32)\n y -= 100.0\n y /= y.max()\n mat = loadmat(os.path.join(temp_dir.name, \"example_psfs.mat\"))\n psf = mat[\"psf\"].astype(np.float64)\n psf -= 102.0\n psf /= np.linalg.norm(psf, axis=(0, 1)).min()\n\n # save as .npz\n npz_file = os.path.join(path, \"ucb_diffcam_data.npz\")\n if verbose:\n print(f\"Saving as {npz_file}\")\n np.savez(npz_file, y=y, psf=psf)\n\n\ndef ucb_diffusercam_data(\n verbose: bool = False, cache_path: Optional[str] = None\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get example data from UC Berkeley Waller Lab diffusercam project.\n\n If the data has previously been downloaded, it will be retrieved from\n a local cache.\n\n Args:\n verbose: Flag indicating whether to print status messages.\n cache_path: Directory in which downloaded data is cached. The\n default is `~/.cache/scico/examples`, where `~` represents\n the user home directory.\n\n Returns:\n tuple: A tuple (y, psf) containing:\n\n - **y** : (np.ndarray): Measured image\n - **psf** : (np.ndarray): Stack of psfs.\n \"\"\"\n\n # set default cache path if not specified\n if cache_path is None: # pragma: no cover\n cache_path = os.path.join(os.path.expanduser(\"~\"), \".cache\", \"scico\", \"examples\")\n\n # create cache directory and download data if not already present\n npz_file = os.path.join(cache_path, \"ucb_diffcam_data.npz\")\n if not os.path.isfile(npz_file): # pragma: no cover\n if not os.path.isdir(cache_path):\n os.makedirs(cache_path)\n get_ucb_diffusercam_data(path=cache_path, verbose=verbose)\n\n # load data and return y and psf arrays converted to float32\n npz = np.load(npz_file)\n y = npz[\"y\"].astype(np.float32)\n psf = npz[\"psf\"].astype(np.float64)\n return y, psf\n\n\ndef downsample_volume(vol: snp.Array, rate: int) -> snp.Array:\n \"\"\"Downsample a 3D array.\n\n Downsample a 3D array. If the volume dimensions can be divided by\n `rate`, this is achieved via averaging distinct `rate` x `rate` x\n `rate` block in `vol`. Otherwise it is achieved via a call to\n :func:`scipy.ndimage.zoom`.\n\n Args:\n vol: Input volume.\n rate: Downsampling rate.\n\n Returns:\n Downsampled volume.\n \"\"\"\n\n if rate == 1:\n return vol\n\n if np.all([n % rate == 0 for n in vol.shape]):\n vol = snp.mean(snp.reshape(vol, (-1, rate, vol.shape[1], vol.shape[2])), axis=1)\n vol = snp.mean(snp.reshape(vol, (vol.shape[0], -1, rate, vol.shape[2])), axis=2)\n vol = snp.mean(snp.reshape(vol, (vol.shape[0], vol.shape[1], -1, rate)), axis=3)\n else:\n vol = zoom(vol, 1.0 / rate)\n\n return vol\n\n\ndef tile_volume_slices(x: snp.Array, sep_width: int = 10) -> snp.Array:\n \"\"\"Make an image with tiled slices from an input volume.\n\n Make an image with tiled `xy`, `xz`, and `yz` slices from an input\n volume.\n\n Args:\n x: Input volume consisting of a 3D or 4D array. If the input is\n 4D, the final axis represents a channel index.\n sep_width: Number of pixels separating the slices in the output\n image.\n\n Returns:\n Image containing tiled slices.\n \"\"\"\n\n if x.ndim == 3:\n fshape: Tuple[int, ...] = (x.shape[0], sep_width)\n else:\n fshape = (x.shape[0], sep_width, 3)\n out = snp.concatenate(\n (\n x[:, :, x.shape[2] // 2],\n snp.full(fshape, snp.nan),\n x[:, x.shape[1] // 2, :],\n ),\n axis=1,\n )\n\n if x.ndim == 3:\n fshape0: Tuple[int, ...] = (sep_width, out.shape[1])\n fshape1: Tuple[int, ...] = (x.shape[2], x.shape[2] + sep_width)\n trans: Tuple[int, ...] = (1, 0)\n\n else:\n fshape0 = (sep_width, out.shape[1], 3)\n fshape1 = (x.shape[2], x.shape[2] + sep_width, 3)\n trans = (1, 0, 2)\n out = snp.concatenate(\n (\n out,\n snp.full(fshape0, snp.nan),\n snp.concatenate(\n (\n x[x.shape[0] // 2, :, :].transpose(trans),\n snp.full(fshape1, snp.nan),\n ),\n axis=1,\n ),\n ),\n axis=0,\n )\n\n out = snp.where(snp.isnan(out), snp.nanmax(out), out)\n\n return out\n\n\ndef create_cone(img_shape: Shape, center: Optional[List[float]] = None) -> snp.Array:\n \"\"\"Compute a 2D map of the distance from a center pixel.\n\n Args:\n img_shape: Shape of the image for which the distance map is being\n computed.\n center: Tuple of center pixel coordinates. If ``None``, this is\n set to the center of the image.\n\n Returns:\n An image containing a 2D map of the distances.\n \"\"\"\n\n if center is None:\n center = [(img_dim - 1) / 2 for img_dim in img_shape]\n\n coords = [snp.arange(0, img_dim) for img_dim in img_shape]\n coord_mesh = snp.meshgrid(*coords, sparse=True, indexing=\"ij\")\n\n dist_map = sum([(coord_mesh[i] - center[i]) ** 2 for i in range(len(coord_mesh))])\n dist_map = snp.sqrt(dist_map)\n\n return dist_map\n\n\ndef gaussian(shape: Shape, sigma: Optional[np.ndarray] = None) -> np.ndarray:\n r\"\"\"Construct a multivariate Gaussian distribution function.\n\n Construct a zero-mean multivariate Gaussian distribution function\n\n .. math::\n f(\\mb{x}) = (2 \\pi)^{-N/2} \\, \\det(\\Sigma)^{-1/2} \\, \\exp \\left(\n -\\frac{\\mb{x}^T \\, \\Sigma^{-1} \\, \\mb{x}}{2} \\right) \\;,\n\n where :math:`\\Sigma` is the covariance matrix of the distribution.\n\n Args:\n shape: Shape of output array.\n sigma: Covariance matrix.\n\n Returns:\n Sampled function.\n\n Raises:\n ValueError: If the array `sigma` cannot be inverted.\n \"\"\"\n\n if sigma is None:\n sigma = np.diag(np.array(shape) / 7) ** 2\n N = len(shape)\n try:\n sigmainv = np.linalg.inv(sigma)\n sigmadet = np.linalg.det(sigma)\n except np.linalg.LinAlgError as e:\n raise ValueError(f\"Invalid covariance matrix {sigma}.\") from e\n grd = np.stack(np.mgrid[[slice(-(n - 1) / 2, (n + 1) / 2) for n in shape]], axis=-1)\n sigmax = np.dot(grd, sigmainv)\n xtsigmax = np.sum(grd * np.dot(grd, sigmainv), axis=-1)\n const = ((2.0 * np.pi) ** (-N / 2.0)) * (sigmadet ** (-1.0 / 2.0))\n return const * np.exp(-xtsigmax / 2.0)\n\n\ndef create_circular_phantom(\n img_shape: Shape, radius_list: list, val_list: list, center: Optional[list] = None\n) -> snp.Array:\n \"\"\"Construct a circular phantom with given radii and intensities.\n\n Args:\n img_shape: Shape of the phantom to be created.\n radius_list: List of radii of the rings in the phantom.\n val_list: List of intensity values of the rings in the phantom.\n center: Tuple of center pixel coordinates. If ``None``, this is\n set to the center of the image.\n\n Returns:\n The computed circular phantom.\n \"\"\"\n\n dist_map = create_cone(img_shape, center)\n\n img = snp.zeros(img_shape)\n for r, val in zip(radius_list, val_list):\n # In numpy: img[dist_map < r] = val\n img = img.at[dist_map < r].set(val)\n\n return img\n\n\ndef create_3d_foam_phantom(\n im_shape: Shape,\n N_sphere: int,\n r_mean: float = 0.1,\n r_std: float = 0.001,\n pad: float = 0.01,\n is_random: bool = False,\n) -> snp.Array:\n \"\"\"Construct a 3D phantom with random radii and centers.\n\n Args:\n im_shape: Shape of input image.\n N_sphere: Number of spheres added.\n r_mean: Mean radius of sphere (normalized to 1 along each axis).\n Default 0.1.\n r_std: Standard deviation of radius of sphere (normalized to 1\n along each axis). Default 0.001.\n pad: Padding length (normalized to 1 along each axis). Default 0.01.\n is_random: Flag used to control randomness of phantom generation.\n If ``False``, random seed is set to 1 in order to make the\n process deterministic. Default ``False``.\n\n Returns:\n 3D phantom of shape `im_shape`.\n \"\"\"\n c_lo = 0.0\n c_hi = 1.0\n\n if not is_random:\n np.random.seed(1)\n\n coord_list = [snp.linspace(0, 1, N) for N in im_shape]\n x = snp.stack(snp.meshgrid(*coord_list, indexing=\"ij\"), axis=-1)\n\n centers = np.random.uniform(low=r_mean + pad, high=1 - r_mean - pad, size=(N_sphere, 3))\n radii = r_std * np.random.randn(N_sphere) + r_mean\n\n im = snp.zeros(im_shape) + c_lo\n for c, r in zip(centers, radii): # type: ignore\n dist = snp.sum((x - c) ** 2, axis=-1)\n if snp.mean(im[dist < r**2] - c_lo) < 0.01 * c_hi:\n # equivalent to im[dist < r**2] = c_hi in numpy\n im = im.at[dist < r**2].set(c_hi)\n\n return im\n\n\ndef create_conv_sparse_phantom(Nx: int, Nnz: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Construct a disc dictionary and sparse coefficient maps.\n\n Construct a disc dictionary and a corresponding set of sparse\n coefficient maps for testing convolutional sparse coding algorithms.\n\n Args:\n Nx: Size of coefficient maps (3 x Nx x Nx).\n Nnz: Number of non-zero coefficients across all coefficient maps.\n\n Returns:\n A tuple consisting of a stack of 2D filters and the coefficient\n map array.\n \"\"\"\n\n # constant parameters\n M = 3\n Nh = 7\n e = 1\n\n # create disc filters\n h = np.zeros((M, 2 * Nh + 1, 2 * Nh + 1))\n gr, gc = np.ogrid[-Nh : Nh + 1, -Nh : Nh + 1]\n for m in range(M):\n r = 2 * m + 3\n d = np.sqrt(gr**2 + gc**2)\n v = (np.clip(d, r - e, r + e) - (r - e)) / (2 * e)\n v = 1.0 - v\n h[m] = v\n\n # create sparse random coefficient maps\n np.random.seed(1234)\n x = np.zeros((M, Nx, Nx))\n idx0 = np.random.randint(0, M, size=(Nnz,))\n idx1 = np.random.randint(0, Nx, size=(2, Nnz))\n val = np.random.uniform(0, 5, size=(Nnz,))\n x[idx0, idx1[0], idx1[1]] = val\n\n return h, x\n\n\ndef create_tangle_phantom(nx: int, ny: int, nz: int) -> snp.Array:\n \"\"\"Construct a volume phantom.\n\n Args:\n nx: x-size of output.\n ny: y-size of output.\n nz: z-size of output.\n\n Returns:\n An array with shape (nz, ny, nx).\n\n \"\"\"\n xs = 1.0 * np.linspace(-1.0, 1.0, nx)\n ys = 1.0 * np.linspace(-1.0, 1.0, ny)\n zs = 1.0 * np.linspace(-1.0, 1.0, nz)\n\n # default ordering for meshgrid is `xy`, this makes inputs of length\n # M, N, P will create a mesh of N, M, P. Thus we want ys, zs and xs.\n xx, yy, zz = np.meshgrid(ys, zs, xs, copy=True)\n xx = 3.0 * xx\n yy = 3.0 * yy\n zz = 3.0 * zz\n values = (\n xx * xx * xx * xx\n - 5.0 * xx * xx\n + yy * yy * yy * yy\n - 5.0 * yy * yy\n + zz * zz * zz * zz\n - 5.0 * zz * zz\n + 11.8\n ) * 0.2 + 0.5\n return (values < 2.0).astype(float)\n\n\ndef spnoise(\n img: Union[np.ndarray, snp.Array], nfrac: float, nmin: float = 0.0, nmax: float = 1.0\n) -> Union[np.ndarray, snp.Array]:\n \"\"\"Return image with salt & pepper noise imposed on it.\n\n Args:\n img: Input image.\n nfrac: Desired fraction of pixels corrupted by noise.\n nmin: Lower value for noise (pepper). Default 0.0.\n nmax: Upper value for noise (salt). Default 1.0.\n\n Returns:\n Noisy image\n \"\"\"\n\n if isinstance(img, np.ndarray):\n spm = np.random.uniform(-1.0, 1.0, img.shape) # type: ignore\n imgn = img.copy()\n imgn[spm < nfrac - 1.0] = nmin\n imgn[spm > 1.0 - nfrac] = nmax\n else:\n spm, key = random.uniform(shape=img.shape, minval=-1.0, maxval=1.0, seed=0) # type: ignore\n imgn = img\n imgn = imgn.at[spm < nfrac - 1.0].set(nmin) # type: ignore\n imgn = imgn.at[spm > 1.0 - nfrac].set(nmax) # type: ignore\n return imgn\n\n\ndef phase_diff(x: snp.Array, y: snp.Array) -> snp.Array:\n \"\"\"Distance between phase angles.\n\n Compute the distance between two arrays of phase angles, with\n appropriate phase wrapping to minimize the distance.\n\n Args:\n x: Input array.\n y: Input array.\n\n Returns:\n Array of angular distances.\n \"\"\"\n\n mod = snp.mod(snp.abs(x - y), 2 * snp.pi)\n return snp.minimum(mod, 2 * snp.pi - mod)\n","sub_path":"scico/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":19310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"407991697","text":"\"\"\"Config for installing a python module/package.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nNAME = 'MTLCC'\nAUTHOR = 'Alejandro Coca-Castro based on Rußwurm & Körner (2018) Multi-Temporal Land Cover Classification with Sequential Recurrent Encoders',\nEMAIL = 'acocac@gmail.com',\nVERSION = '0.1'\nREQUIRED_PACKAGES = ['configparser','cloudml-hypertune']\nLICENSE = 'MIT'\nDESCRIPTION = 'Run MTLCC in Google AI'\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n license=LICENSE,\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n zip_safe=False)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"241459735","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 23 11:03:26 2018\r\n\r\n@author: CAZ2BJ\r\n\"\"\"\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport functions_io as fio\r\nimport functions_csv as fcsv\r\nimport functions_plot as fplot\r\nimport functions_excel as fexcel\r\nimport functions_data_processing as fdp\r\n\r\ndef remove_empty_folders(path, remove_dir_level, removeRoot=True): # remove_dir_level = 0 dir, 1 - first inner dirs ...\r\n remove_dir_level = remove_dir_level + 1 \r\n\r\n if not os.path.isdir(path):\r\n return\r\n\r\n files = os.listdir(path) # search for dirs of files\r\n \r\n if len(files): # if dirs or files have been found\r\n for f in files: # for dir or file in dirs_and_files\r\n fullpath = os.path.join(path, f) # create full_path\r\n if os.path.isdir(fullpath): # if current file or dir is DIR\r\n remove_empty_folders(fullpath, remove_dir_level) # run recursively another instance of function \r\n \r\n files = os.listdir(path)\r\n if len(files) == 0 and removeRoot and remove_dir_level > 3:\r\n print (\"Removing empty folder:\", path)\r\n os.rmdir(path)\r\n\r\n else:\r\n pass\r\n \r\n \r\n\r\n\r\nfiles = [r'X:\\Dnox\\Tesla\\2014', r'X:\\Dnox\\Tesla\\2015', r'X:\\Dnox\\Tesla\\2016', r'X:\\Dnox\\Tesla\\2017']\r\n\r\nfor file in files:\r\n remove_empty_folders(file,1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"empty_remove.py","file_name":"empty_remove.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"508237477","text":"# NOTE: 'procs' is the abbreviation of 'processed'\n# NOTE: 'baln' is the abbreviation of 'balance(d)'\n\nimport numpy as np\nimport cv2\nimport pickle\nimport time\n\nfileNames = [\"190627_184747_1067\", \"190625_221209_1810\"]\nRESIZE_RATE = 0.25\n\nDAYNIGHT_CHECK_POS = (5, 5)\nUSED_KEYS = [\"up\", \"down\", \"none\"]\n\n\nkeyIndex = {key: idx for idx, key in enumerate(USED_KEYS)}\n\n\ndef shuffle2Array(arr1, arr2, length, axis1=0, axis2=0):\n assert len(arr1) == len(arr2) == length, \\\n \"Error: arr1, arr2, length have different length.\"\n shuffleIndex = np.random.permutation(length)\n shuffledArr1 = arr1[shuffleIndex]\n shuffledArr2 = arr2[shuffleIndex]\n return shuffledArr1, shuffledArr2\n\n\ndef balanceData(npImgs, npKeys):\n # Separate up/down/none key imgs\n upImgs = npImgs[npKeys == 'up']\n downImgs = npImgs[npKeys == 'down']\n noneImgs = npImgs[npKeys == 'none']\n\n # Balance the quantity of data (cut noneImgs to the same amount)\n noneImgs = np.random.permutation(noneImgs)\n noneImgs = noneImgs[: max(len(upImgs), len(downImgs)), :, :]\n\n # Concatenate Imgs, and shuffle\n concatImgs = np.concatenate([upImgs, downImgs, noneImgs])\n concatKeys = np.array(keyIndex[\"up\"] * len(upImgs) +\n keyIndex[\"down\"] * len(downImgs) +\n keyIndex[\"none\"] * len(noneImgs))\n balnImgs, balnKeys = shuffle2Array(concatImgs, concatKeys, len(concatImgs))\n\n return balnImgs, balnKeys\n\n\ndef processImg(Img):\n # Resize image to reduce data volume (half size)\n procsImg = cv2.resize(Img, None, fx=RESIZE_RATE, fy=RESIZE_RATE)\n\n # Threshold (set maxval to 1, to normalize)\n if Img[DAYNIGHT_CHECK_POS] >= 127:\n _, procsImg = cv2.threshold(procsImg, 127, 1, cv2.THRESH_BINARY)\n else:\n # Inverse color when it's night time\n _, procsImg = cv2.threshold(procsImg, 127, 1, cv2.THRESH_BINARY_INV)\n\n # OPTIMIZE: ugly code\n procsImg = procsImg.reshape([1] + list(procsImg.shape) + [1])\n\n return procsImg\n\n\nif __name__ == \"__main__\":\n # with open(f\"0_orig_data/{fileNames}.pickle\", 'rb') as file:\n # origData = pickle.load(file)\n # origImgs = origImgs.append(origData[\"origimgs\"], axis=0)\n # keys = origData[\"keys\"]\n\n # Load data\n print(\"Loading data...\")\n origImgs = []\n keys = []\n for fileName in fileNames:\n with open(f\"0_orig_data/{fileName}.pickle\", 'rb') as file:\n origData = pickle.load(file)\n origImgs.extend(origData[\"origImgs\"])\n keys.extend(origData[\"keys\"])\n\n # Change to np.array format\n origImgs = np.array(origImgs)\n keys = np.array(keys)\n print(\"Data loaded!\")\n\n # Process data\n # OPTIMIZE: try to use parallel operation to optimize\n print(\"Processing images...\")\n # OPTIMIZE: somehow ugly\n procs_size = (1,\n round(origImgs.shape[1] * RESIZE_RATE),\n round(origImgs.shape[2] * RESIZE_RATE),\n 1)\n\n counter = 0\n per = round(len(origImgs) / 10)\n procsImgs = np.empty(procs_size)\n for img in origImgs:\n procsImg = processImg(img)\n procsImgs = np.append(procsImgs, procsImg, axis=0)\n\n # Show progress rate\n if counter % per == 0:\n print(f\"{counter//per}%\")\n counter += 1\n\n procsImgs = procsImgs[1:]\n print(\"Images processed!\")\n\n # Normalization\n # procsImgs = (procsImgs) / (procsImgs.max() - procsImgs.min())\n\n # Balance data\n print(\"Balancing data...\")\n procsImgs, keys = balanceData(procsImgs, keys)\n print(\"Data balanced!\")\n\n # Save balanced data\n print(\"Saving data...\")\n saveFileName = f\"{time.strftime('%y%m%d_%H%M%S')}_{len(procsImgs)}\"\n with open(f\"0_balanced_data/{saveFileName}.pickle\", 'wb') as file:\n procsData = {\"procsImgs\": procsImgs, \"keys\": keys}\n pickle.dump(procsData, file)\n print(f\"Saved as {saveFileName}\")\n print(\"Data saved!\")\n","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"613014781","text":"#! /usr/bin/env python\nimport pygame\n\n#Clase para el Muro\nclass Muro(pygame.sprite.Sprite):\n\tdef __init__(self,posx, posy):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.imagenMuro = pygame.image.load('img/muro.png')\n\t\tself.rect = self.imagenMuro.get_rect()\n\t\tself.visible = False\n\t\tself.colisiones = True\t\t\t\n\t\tself.rect.top = posy\n\t\tself.rect.left = posx\n\t\t\n\tdef dibujar(self,superficie):\n\t\tsuperficie.blit(self.imagenMuro, self.rect)","sub_path":"Practica1/Laberinto/Clases/Muro.py","file_name":"Muro.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"282667951","text":"import sys\nimport subprocess\nimport os\n\nif len(sys.argv) < 2:\n print(\"need to provide a folder arg\")\n sys.exit()\n\npath = os.path.dirname(os.path.abspath(__file__))\nfolder = sys.argv[1]\nprint(\"mpiexec -n 1 \"+path+\"/HDF_INPUT/basic_industries.h5 \"+path+\"/HDF_OUTPUT/\"+folder+\"/\")\nsubprocess.call(\"mpiexec -n 1 \"+path+\"/main_parallel \"+path+\"/HDF_INPUT/basic_industries.h5 \"+path+\"/HDF_OUTPUT/\"+folder+\"/\", shell=True)\n\n","sub_path":"runme.py","file_name":"runme.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"193825577","text":"\"\"\"\nModeling helper functions\n\"\"\"\nimport pandas as pd\nfrom sklearn import metrics\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef evaluate_performance(test_y, test_pred, print_vals=True):\n cnf_matrix = metrics.confusion_matrix(test_y, test_pred)\n \n class_names=['Reg 1', 'Reg 2', 'Reg 3', 'Reg 5']\n cnf_matrix = pd.DataFrame(cnf_matrix, index = class_names,\n columns = class_names)\n \n # plot confusion matrix with heatmap\n sns.heatmap(cnf_matrix, annot=True, cmap=\"YlGnBu\" ,fmt='g')\n plt.tight_layout()\n plt.title('Confusion matrix', y=1.1)\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n \n if print_vals :\n count_misclassified = (test_y != test_pred).sum()\n print('Misclassified samples: {}'.format(count_misclassified))\n accuracy = metrics.accuracy_score(test_y, test_pred)\n print('Classification Report:')\n print(metrics.classification_report(test_y, test_pred)) \n\ndef microaveage_F1(test_y, test_pred):\n return metrics.classification_report(test_y, test_pred, output_dict=True)['weighted avg']['f1-score']","sub_path":"notebooks/Notebook_helpers/modeling_helpers.py","file_name":"modeling_helpers.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"589608270","text":"import random\nfrom statistics import mean\n\n\nclass Player:\n def __init__(self):\n self.scores = []\n self.stop = 0\n\n def roll_die(self):\n return random.randint(1, 6)\n\n\nclass SmartPlayer(Player):\n def __init__(self):\n super().__init__()\n self.stop = 20\n\n\nclass SmartPlayer1(Player):\n def __init__(self):\n super().__init__()\n self.stop = 19\n\n\nclass SmartPlayer2(Player):\n def __init__(self):\n super().__init__()\n self.stop = 21\n\n\nclass Game:\n def __init__(self, turns, player):\n self.player = player\n self.turns = turns\n self.score = 0\n\n def turn(self):\n turn_score = 0\n while True:\n roll = self.player.roll_die()\n if roll == 1:\n turn_score = 0\n break\n else:\n turn_score += roll\n # when made 2 player game will add input here\n if self.player.stop < turn_score:\n break\n self.score += turn_score\n\n def play_game(self):\n while self.turns > 0:\n self.turn()\n self.turns -= 1\n self.player.scores.append(self.score)\n\n\ndef main():\n def_player = Player()\n smart_player = SmartPlayer()\n smart_player_19 = SmartPlayer1()\n #smart_player_21 = SmartPlayer2()\n\n games = 100000\n\n while games > 0:\n Game(7, def_player).play_game()\n Game(7, smart_player).play_game()\n Game(7, smart_player_19).play_game()\n #Game(7, smart_player_21).play_game()\n games -= 1\n\n print(\"\"\"\nDefault Player Mean Score: {}\nSmart Player 20 Mean Score: {}\nSmart Player 19 Mean Score: {}\n \"\"\".format(mean(def_player.scores), mean(smart_player.scores),\n mean(smart_player_19.scores)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pigsol.py","file_name":"pigsol.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"460825892","text":"#!/usr/bin/env python\n\nimport SocketServer as ss\nimport struct\nimport os\nfrom binascii import hexlify\nimport hashlib\nfrom subprocess import Popen, PIPE\n\n\nclass Handler(ss.StreamRequestHandler):\n\n def handle(self):\n put = self.wfile.write\n sigbytes = 2592\n\n put('Signature verification service, please send a message first\\n')\n msg = self.rfile.readline()[:-1]\n msghash = hashlib.sha256(msg).hexdigest()\n print('verifying sig for %s from %s' % (msg, self.client_address))\n\n put('Now please send a signature, in hex\\n')\n sig = self.rfile.readline()[:-1]\n\n process = Popen(['./verify', msghash, sig], stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate()\n\n if stderr != '':\n put(stderr)\n return\n else:\n put(\"Signature is valid\\n\")\n\n\nclass ReusableTCPServer(ss.ForkingMixIn, ss.TCPServer):\n allow_reuse_address = True\n\nif __name__ == '__main__':\n HOST, PORT = ('0.0.0.0', 2222)\n ss.TCPServer.allow_reuse_address = True\n server = ReusableTCPServer((HOST, PORT), Handler)\n server.serve_forever()\n","sub_path":"serve_verify.py","file_name":"serve_verify.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"108607638","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nfrom collections import namedtuple, OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.model_zoo as model_zoo\n\nfrom nets.network import Network\nfrom model.config import cfg\nfrom .utils import Adapt2CaffeData\n\n\ndef conv_bn(inp, oup, stride):\n\treturn nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))\n\n\ndef conv_1x1_bn(inp, oup):\n\treturn nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))\n\n\nclass InvertedResidual(nn.Module):\n\tdef __init__(self, inp, oup, stride, expand_ratio):\n\t\tsuper(InvertedResidual, self).__init__()\n\t\tself.stride = stride\n\t\tassert stride in [1, 2]\n\n\t\thidden_dim = round(inp * expand_ratio)\n\t\tself.use_res_connect = self.stride == 1 and inp == oup\n\n\t\tif expand_ratio == 1:\n\t\t\tself.conv = nn.Sequential( # dw\n\t\t\t\tnn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n\t\t\t\tnn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear\n\t\t\t\tnn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )\n\t\telse:\n\t\t\tself.conv = nn.Sequential( # pw\n\t\t\t\tnn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True),\n\t\t\t\t# dw\n\t\t\t\tnn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n\t\t\t\tnn.BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), # pw-linear\n\t\t\t\tnn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )\n\n\tdef forward(self, x):\n\t\tif self.use_res_connect:\n\t\t\treturn x + self.conv(x)\n\t\telse:\n\t\t\treturn self.conv(x)\n\n\n\n\n\nclass MobileNetV2(nn.Module):\n\tdef __init__(self, n_class=1000, input_size=224, width_mult=1.):\n\t\tsuper(MobileNetV2, self).__init__()\n\t\tself.preprocess = Adapt2CaffeData()\n\n\t\tblock = InvertedResidual\n\t\tinput_channel = 32\n\t\tlast_channel = 1280\n\t\tinterverted_residual_setting = [ # t, c, n, s\n\t\t\t[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ]\n\n\t\t# building first layer\n\t\tassert input_size % 32 == 0\n\t\tinput_channel = int(input_channel * width_mult)\n\t\tself.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel\n\t\tself.features = [conv_bn(3, input_channel, 2)]\n\t\t# building inverted residual blocks\n\t\tfor t, c, n, s in interverted_residual_setting:\n\t\t\toutput_channel = int(c * width_mult)\n\t\t\tfor i in range(n):\n\t\t\t\tif i == 0:\n\t\t\t\t\tself.features.append(block(input_channel, output_channel, s, expand_ratio=t))\n\t\t\t\telse:\n\t\t\t\t\tself.features.append(block(input_channel, output_channel, 1, expand_ratio=t))\n\t\t\t\tinput_channel = output_channel\n\t\t# building last several layers\n\t\tself.features.append(conv_1x1_bn(input_channel, self.last_channel))\n\t\t# make it nn.Sequential\n\t\tself.features = nn.Sequential(*self.features)\n\n\t\t# building classifier\n\t\tself.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, n_class), )\n\n\t\tself._initialize_weights()\n\n\tdef forward(self, x):\n\t\tx = self.preprocess(x)\n\t\tx = self.features(x)\n\t\tprint(x.size())\n\t\tx = x.mean(3).mean(2)\n\t\tx = self.classifier(x)\n\t\treturn x\n\n\tdef _initialize_weights(self):\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\tn = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n\t\t\t\tm.weight.data.normal_(0, math.sqrt(2. / n))\n\t\t\t\tif m.bias is not None:\n\t\t\t\t\tm.bias.data.zero_()\n\t\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.weight.data.fill_(1)\n\t\t\t\tm.bias.data.zero_()\n\t\t\telif isinstance(m, nn.Linear):\n\t\t\t\tn = m.weight.size(1)\n\t\t\t\tm.weight.data.normal_(0, 0.01)\n\t\t\t\tm.bias.data.zero_()\n\n\ndef mnet_v2(pretrained=False):\n\tmodel = MobileNetV2()\n\tif pretrained:\n\t\turl = \"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"\n\t\tfp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\tmodel.load_state_dict(fp, strict=False)\n\treturn model\n\nfrom .modules.layers import ConvLayer\n\nclass mobilenetv2(Network):\n\tdef __init__(self):\n\t\tNetwork.__init__(self)\n\t\tself._feat_stride = [16, ]\n\t\tself._feat_compress = [1. / float(self._feat_stride[0]), ]\n\t\tself._depth_multiplier = cfg.MOBILENET.DEPTH_MULTIPLIER\n\t\tself._net_conv_channels = 320\n\t\tself._fc7_channels = 1280\n\n\tdef init_weights(self):\n\t\tdef normal_init(m, mean, stddev, truncated=False):\n\t\t\t\"\"\"\n\t\t\tweight initalizer: truncated normal and random normal.\n\t\t\t\"\"\"\n\t\t\tif m.__class__.__name__.find('Conv') == -1:\n\t\t\t\treturn\n\t\t\tif truncated:\n\t\t\t\tm.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n\t\t\telse:\n\t\t\t\tm.weight.data.normal_(mean, stddev)\n\t\t\tif m.bias is not None: m.bias.data.zero_()\n\n\t\tself.mobilenet.apply(lambda m: normal_init(m, 0, 0.09, True))\n\t\tnormal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n\t\tnormal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n\tdef _image_to_head(self):\n\t\tnet_conv = self._layers['head'](self._image)\n\t\tself._act_summaries['conv'] = net_conv\n\n\t\treturn net_conv\n\n\tdef _head_to_tail(self, pool5):\n\t\tfc7 = self._layers['tail'](pool5)\n\t\tfc7 = fc7.mean(3).mean(2)\n\t\treturn fc7\n\n\tdef _init_head_tail(self):\n\t\tself.mobilenet = mnet_v2()\n\n\t\t# Fix blocks\n\t\tassert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)\n\t\tfor m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:\n\t\t\tfor p in m.parameters():\n\t\t\t\tp.requires_grad = False\n\n\t\tdef set_bn_fix(m):\n\t\t\tclassname = m.__class__.__name__\n\t\t\tif classname.find('BatchNorm') != -1:\n\t\t\t\tfor p in m.parameters(): p.requires_grad = False\n\n\t\tself.mobilenet.apply(set_bn_fix)\n\n\t\t# Add weight decay\n\t\tdef l2_regularizer(m, wd, regu_depth):\n\t\t\tif isinstance(m, ConvLayer):\n\t\t\t\treturn\n\t\t\tif m.__class__.__name__.find('Conv') != -1:\n\t\t\t\tif regu_depth or m.groups == 1:\n\t\t\t\t\tm.weight.weight_decay = wd\n\t\t\t\telse:\n\t\t\t\t\tm.weight.weight_decay = 0\n\n\t\tself.mobilenet.apply(lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY, cfg.MOBILENET.REGU_DEPTH))\n\n\t\t# Build mobilenet.\n\t\t# self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12])\n\t\t# self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])\n\t\tself._layers['head'] = nn.Sequential(*list(self.mobilenet.features.children())[:-1])\n\t\tself._layers['tail'] = nn.Sequential(*list(self.mobilenet.features.children())[-1:])\n\n\tdef train(self, mode=True):\n\t\t# Override train so that the training mode is set as we want\n\t\tnn.Module.train(self, mode)\n\t\tif mode:\n\t\t\t# Set fixed blocks to be in eval mode (not really doing anything)\n\t\t\tfor m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:\n\t\t\t\tm.eval()\n\n\t\t\t# Set batchnorm always in eval mode during training\n\t\t\tdef set_bn_eval(m):\n\t\t\t\tclassname = m.__class__.__name__\n\t\t\t\tif classname.find('BatchNorm') != -1:\n\t\t\t\t\tm.eval()\n\n\t\t\tself.mobilenet.apply(set_bn_eval)\n\n\tdef load_pretrained_cnn_from_url(self, url=\"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"):\n\t\tfp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\tself.mobilenet.load_state_dict(fp, strict=False)\n\n\tdef load_pretrained_cnn(self, state_dict):\n\t\tDeprecationWarning(\"This API should NOT be called when using MobileNet V2\")\n\t\tprint('Warning: No available pretrained model yet')\n\t\tself.mobilenet.load_state_dict({k: state_dict['features.' + k] for k in list(self.mobilenet.state_dict())})\n\t\t# url = \"http://file.lzhu.me/pytorch/models/mobilenet_v2-ecbe2b56.pth.tar\"\n\t\t# fp = model_zoo.load_url(url, map_location=\"cpu\")\n\t\t# self.mobilenet.load_state_dict(fp)\n","sub_path":"lib/nets/mobilenet_v2.py","file_name":"mobilenet_v2.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"504809313","text":"import hashlib\nimport switchMode as switch\n\nclass main():\n def __init__(self):\n #self.godGuide2 = {}\n self.godGuide = {'avl': {}, 'b': {}, 'bplus': {}, 'dict': {}, 'isam': {}, 'json': {}, 'hash': {}}\n self.guiaModos = {}\n self.listMode = ['avl', 'hash', 'b', 'bplus', 'dict', 'isam', 'json']\n self.listEncoding = ['ascii', 'iso-8859-1', 'utf8']\n\n #---------------------FUNCIONES DE UNIFICACION DE MODOS DE ALMACENAMIENTO----------------------#\n\n # CREAR BASE DE DATOS\n\n def createDatabase(self, database, mode, encoding='ascii'):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if not self.searchDB2(database):\n if self.verifyEncoding(encoding):\n try:\n self.godGuide[mode][database] = [{}, encoding]\n self.guiaModos[database] = mode\n switch.switchMode(mode).createDatabase(database)\n return 0\n except:\n return 1\n return 4\n return 2\n return 3\n return 1\n\n # ---------------------FUNCIONES DE ADMINISTRACION DEL MODO DE ALMACENAMIENTO----------------------#\n\n # CAMBIA EL MODO DE UNA TABLA\n\n def alterTableMode(self, database, table, mode):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if self.searchDB2(database):\n if self.searchTB(database, table):\n try:\n if database in switch.switchMode(mode).showDatabases():\n if table not in switch.switchMode(mode).showTables(database):\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n if table in self.godGuide[i][database][0].keys():\n lis = self.godGuide[i][database][0].pop(table)\n self.godGuide[mode][database][0][table] = lis\n tabla = self.extTB(database, table)\n self.delTB(database, table)\n switch.switchMode(mode).createTable(database, table, lis[0])\n for i in tabla:\n switch.switchMode(mode).insert(database, table, i)\n else:\n return 1\n else:\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n if table in self.godGuide[i][database][0].keys():\n encoding = self.godGuide[i][database][1]\n lis = self.godGuide[i][database][0].pop(table)\n self.godGuide[mode][database] = [{}, encoding]\n self.godGuide[mode][database][0][table] = lis\n\n #self.createDatabase(database, mode, encoding)\n switch.switchMode(mode).createDatabase(database)\n tabla = self.extTB(database, table)\n self.delTB(database, table)\n switch.switchMode(mode).createTable(database, table, lis[0])\n for i in tabla:\n switch.switchMode(mode).insert(database, table, i)\n return 0\n except:\n return 1\n return 3\n return 2\n return 4\n return 1\n\n # CAMBIA EL MODO DE UNA BASE DE DATOS\n\n def alterDatabaseMode(self, database, mode):\n if self.identify(str(database)):\n if self.verifyMode(mode):\n if self.searchDB2(database):\n try:\n for i in self.listMode:\n if i != mode:\n if database in switch.switchMode(i).showDatabases():\n if len(switch.switchMode(i).showTables(database)) == 0:\n modoA = i\n lis = self.godGuide[modoA].pop(database)\n self.guiaModos[database] = mode\n self.godGuide[mode][database] = lis\n #self.createDatabase(database, mode, lis[1])\n switch.switchMode(mode).createDatabase(database)\n else:\n modoA = i\n self.guiaModos[database] = mode\n for j in switch.switchMode(i).showTables(database):\n self.alterTableMode(database, j, mode)\n self.godGuide[modoA].pop(database)\n #self.godGuide[mode][database] = lis\n switch.switchMode(i).dropDatabase(database)\n return 0\n except:\n return 1\n return 2\n return 4\n return 1\n\n # ---------------------FUNCIONES DE ADMINISTRACION DE INDICES----------------------#\n\n # ---------------------FUNCIONES DE ADMINISTRACION DE LA CODIFICACION----------------------#\n\n def alterDatabaseEncoding(self, dataBase, codi):\n try:\n if codi == '' or codi == None:\n codi = 'ascii'\n leLlave = []\n for i in self.listMode: #para saber si existe la base\n if self.searchDB(dataBase, i):\n if self.verifyEncoding(codi):\n tb = self.showTables(dataBase)\n if tb != []: #saber si tiene o no tablas la base\n for j in tb: #para cod los nombres de las tablas\n tp = self.extractTable(dataBase, j) #jalar las tuplas\n if tp != []: #para codificar tuplas\n llave = self.godGuide[i][dataBase][0][j][1]\n for k in range(0,len(tp)):\n leTP = []\n for l in tp[k]:\n #para saber si viene codificado ya\n if type(l) is bytes:\n x = l.decode(self.godGuide[i][dataBase][0][j][2])\n leTP += [str(x).encode(encoding= codi, errors= 'backslashreplace')]\n else:\n leTP += [str(l).encode(encoding= codi, errors= 'backslashreplace')]\n for h in llave:\n leLlave.append(tp[k][h])\n leNewtp = {}\n for n in range(0,len(leTP)):\n leNewtp[n] = leTP[n]\n self.update(dataBase,j,leNewtp,leLlave)\n leLlave = []\n self.godGuide[i][dataBase][0][j][2] = codi\n return 0\n else:\n return 3\n return 2\n except:\n return 1\n\n # ---------------------FUNCIONES DE GENERACION DEL CHECKSUM----------------------#\n\n # GENERA EL CHECKSUM DE TODAS LAS TABLAS DE UNA BASE DE DATOS\n\n def checksumDatabase(self, database, mode):\n modos = ['MD5', 'SHA256']\n tablas = self.showTables(database)\n tuplas = []\n tmp = \"\"\n try:\n if mode not in modos:\n return None\n for i in tablas:\n for j in self.extractTable(database, i):\n tuplas.append(j)\n for i in tuplas:\n for j in i:\n tmp += str(j)\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n encoding = self.godGuide[i][database][1]\n if mode == 'MD5':\n hash = hashlib.md5(tmp.encode(encoding))\n elif mode == 'SHA256':\n hash = hashlib.sha256(tmp.encode(encoding))\n hash = hash.hexdigest()\n print(tmp)\n return hash\n except:\n return None\n\n # GENERA EL CHECKSUM DE UNA TABLA EN ESPECIFICO\n\n def checksumTable(self, database, table, mode):\n modos = ['MD5', 'SHA256']\n tmp = \"\"\n try:\n if mode not in modos:\n return None\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n encoding = self.godGuide[i][j][0][table][2]\n tuplas = self.extractTable(database, table)\n for i in tuplas:\n for j in i:\n tmp += str(j)\n if mode == 'MD5':\n hash = hashlib.md5(tmp.encode(encoding))\n elif mode == 'SHA256':\n hash = hashlib.sha256(tmp.encode(encoding))\n hash = hash.hexdigest()\n print(tmp)\n return hash\n except:\n return None\n\n # ---------------------FUNCIONES DE COMPRESION DE DATOS----------------------#\n\n # ---------------------FUNCIONES DE SEGURIDAD----------------------#\n\n # ---------------------FUNCIONES DE GRAFOS----------------------#\n\n #---------------------FUNCIONES BASES DE DATOS (ANTERIORES)----------------------#\n\n # LISTA DE BASES DE DATOS ALMACENADAS\n\n def showDatabases(self):\n re = []\n for i in self.listMode:\n re = re + switch.switchMode(i).showDatabases()\n return re\n\n # CAMBIAR NOMBRE DE UNA BASE DE DATOS\n\n def alterDatabase(self, databaseOld, databaseNew):\n re = 1\n for i in self.listMode:\n if self.searchDB(databaseOld, i):\n for i in self.listMode:\n if not self.searchDB2(databaseNew):\n re = switch.switchMode(i).alterDatabase(databaseOld, databaseNew)\n if re == 0:\n\n ward = self.guiaModos.pop(databaseOld)\n self.guiaModos[databaseNew] = ward\n\n for i in self.listMode:\n if databaseOld in self.godGuide[i].keys():\n ward = self.godGuide[i].pop(databaseOld)\n self.godGuide[i][databaseNew] = ward\n return re\n\n # ELIMINAR BASE DE DATOS\n\n def dropDatabase(self, database):\n re = 1\n for i in self.listMode:\n if self.searchDB(database, i):\n re = switch.switchMode(i).dropDatabase(database)\n if re == 0:\n self.guiaModos.pop(database)\n for i in self.listMode:\n if database in self.godGuide[i].keys():\n self.godGuide[i].pop(database)\n return re\n\n # ---------------------FUNCIONES TABLAS----------------------#\n\n # CREAR TABLA EN UNA DETERMINADA BASE DE DATOS\n\n def createTable(self, database, table, numberColumns):\n re = switch.switchMode(self.guiaModos[database]).createTable(database, table, numberColumns)\n if re == 0:\n mod = self.guiaModos[database]\n self.godGuide[mod][database][0][table] = [numberColumns, None, self.godGuide[self.guiaModos[database]][database][1], False]\n return re\n\n # LISTA DE TABLAS AGREGADAS A UNA BASE DE DATOS\n\n def showTables(self, database):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n re = re + switch.switchMode(i).showTables(database)\n return re\n\n # LISTA DE REGISTROS DE UNA TABLA EN UN BASE DE DATOS\n\n def extractTable(self, database, table):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = re + switch.switchMode(i).extractTable(database, table)\n return re\n\n #LISTA REGISTROS EN UN RANGO DE UNA TABLA\n\n def extractRangeTable(self, database, table, columnNumber, lower, upper):\n re = []\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = re + switch.switchMode(i).extractRangeTable(database, table, columnNumber, lower, upper)\n return re\n\n # AGREGAR LISTA DE LLAVES PRIMARIAS A UNA TABLA\n\n def alterAddPK(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterAddPK(database, table, columns)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][1] = columns\n return re\n\n # ELIMINAR LAS LLAVES PRIMARIAS DE UNA TABLA\n\n def alterDropPK(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterDropPK(database, table)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][1] = None\n return re\n\n # CAMBIAR EL NOMBRE DE UNA TABLA\n\n def alterTable(self, database, tableOld, tableNew):\n for i in self.listMode:\n if self.searchDB(database, i):\n if tableOld in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterTable(database, tableOld, tableNew)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if tableOld in self.godGuide[i][j][0].keys() and j == database:\n ward = self.godGuide[i][j][0].pop(tableOld)\n self.godGuide[i][j][0][tableNew] = ward\n return re\n\n # AGREGAR UN NUEVO REGISTRO A LAS TABLAS EXISTENTES\n\n def alterAddColumn(self, database, table, default):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterAddColumn(database, table, default)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][0] += 1\n return re\n\n # ELIMINAR UNA COLUMNA ESPECIFICA DE UNA TABLA\n\n def alterDropColumn(self, database, table, columnNumber):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).alterDropColumn(database, table, columnNumber)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0][table][0] -= 1\n return re\n\n # ELIMINAR UNA TABLA DE LA BASE DE DATOS\n\n def dropTable(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n re = switch.switchMode(i).dropTable(database, table)\n if re == 0:\n for i in self.listMode:\n for j in self.godGuide[i].keys():\n if table in self.godGuide[i][j][0].keys() and j == database:\n self.godGuide[i][j][0].pop(table)\n return re\n\n # ---------------------FUNCIONES TUPLAS----------------------#\n\n # AÑADIR REGISTROS A UNA TABLA\n\n def insert(self, database, table, register):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).insert(database, table, register)\n\n # CARGA DE REGISTROS MEDIANTE UN CSV\n\n def loadCSV(self, file, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).loadCSV(file, database, table)\n\n # REGISTRO SEGUN LLAVE PRIMARIA\n\n def extractRow(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).extractRow(database, table, columns)\n\n # MODIFICA UN REGISTRO EN ESPECIFICO\n\n def update(self, database, table, register, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).update(database, table, register, columns)\n\n # ELIMINA UN REGISTRO EN ESPECIFICO\n\n def delete(self, database, table, columns):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).delete(database, table, columns)\n\n # ELIMINA TODOS LOS REGISTROS DE UNA TABLA\n\n def truncate(self, database, table):\n for i in self.listMode:\n if self.searchDB(database, i):\n if table in switch.switchMode(i).showTables(database):\n return switch.switchMode(i).truncate(database, table)\n\n # -------------------------UTILIDADES-------------------------#\n\n def identify(self, id):\n id = str(id)\n if id[0].isalpha():\n return True\n else:\n if id[0].isdigit():\n return False\n return False\n\n def verifyMode(self, mode):\n if mode in self.listMode:\n return True\n return False\n\n def verifyEncoding(self, encoding):\n if encoding in self.listEncoding:\n return True\n return False\n\n def searchDB(self, key, mode):\n if key in switch.switchMode(mode).showDatabases():\n return True\n return False\n\n def searchDB2(self, key):\n for i in self.listMode:\n if key in switch.switchMode(i).showDatabases():\n return True\n return False\n\n def searchTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n return True\n return False\n\n def extTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n return switch.switchMode(i).extractTable(j, table)\n\n def delTB(self, database, table):\n for i in self.listMode:\n for j in switch.switchMode(i).showDatabases():\n if table in switch.switchMode(i).showTables(j):\n switch.switchMode(i).dropTable(j, table)\n return None\n\n","sub_path":"storage/fase2/team17/storage/mainMode.py","file_name":"mainMode.py","file_ext":"py","file_size_in_byte":20462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"358684093","text":"#!/usr/bin/env python3\n\n\"\"\" Helpful utilities, used by various testcases in testing dlist_node. \"\"\"\n\n\n\n# NOTE: It's not necessary to import the dlist_node file, since we don't actually\n# create any *new* objects in these functions.\n\n\n\ndef dlist_is_consistent(head):\n \"\"\" Returns True if the list appears to be valid (that is, has all the\n proper next/prev pointers, and the head is actually the head of the list)\n\n Note that head is a pointer to what we believe is the head of the list\n (might be none).\n \"\"\"\n\n if head is None:\n # trivially OK!\n return True\n\n if head.prev is not None:\n print(f\"ERROR: The node that we think is the head of the list (the node containing {head.val}) has a non-None prev pointer.\")\n return False\n\n cur = head\n while cur is not None:\n if cur.prev is not None:\n if cur.prev.next is not cur:\n print(\"ERROR: next/prev mismatch detected.\")\n print(f\" current node: id={id(cur)} value: {cur.val}\")\n print(f\" prev node: id={id(cur.prev)} value: {cur.prev.val}\")\n print(f\" prev.next node: id={id(cur.prev.next)} --- value not printed ---\")\n return False\n else:\n if cur is not head:\n print(f\"ERROR: The list node containing {cur.val} is not the head, but its prev link is None.\")\n return False\n\n cur = cur.next\n\n return True\n\n\n\ndef dlist_to_str(dllist):\n \"\"\" convert a doubly-linked list to a string \"\"\"\n if dllist is None:\n return \"None\"\n else:\n cur = dllist \n vals, objs = [], []\n while cur is not None:\n cur_str = str(cur.val)\n if cur in objs:\n vals.append(cur_str+\" <=> ... (to infinity and beyond)\")\n break\n else:\n vals.append(cur_str)\n objs.append(cur)\n cur = cur.next\n\n return \" <=> \".join(vals)\n\n\n","sub_path":"python02/proj09_short/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"81729138","text":"# -*- coding: utf-8 -*-\nimport configparser\nfrom manager.config.NewConfigParser import NewConfigParser\nfrom os import path\ndef get_config(): \n cp = NewConfigParser()\n cp.read(path.join(path.dirname(path.abspath(__file__)), 'manager.conf'), encoding='utf-8')\n config = {}\n for section in cp.sections():\n config[section] = {}\n for option in cp.options(section):\n config[section][option] = cp.get(section, option) \n return config \n","sub_path":"manager/config/GetConfig.py","file_name":"GetConfig.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"462284996","text":"import unittest\n\nimport falcon\nimport falcon.testing as testing\nfrom mock import MagicMock\nfrom mock import patch\n\nfrom meniscus.openstack.common import jsonutils\nfrom meniscus.api.pairing.resources import PairingConfigurationResource\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(WhenTestingPairingConfigurationResource())\n return suite\n\n\nclass WhenTestingPairingConfigurationResource(testing.TestBase):\n def before(self):\n self.configuration = {\n 'pairing_configuration': {\n \"api_secret\": \"ce20a1f3-151b-4302-ad42-52d91349fe8b\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"worker\"\n }\n }\n self.configuration_bad_secret = {\n 'pairing_configuration': {\n \"api_secret\": \"this is not a uuid\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"worker\"\n }\n }\n self.configuration_bad_personality = {\n 'pairing_configuration': {\n \"api_secret\": \"ce20a1f3-151b-4302-ad42-52d91349fe8b\",\n \"coordinator_uri\": \"http://localhost:8080/v1\",\n \"personality\": \"invalid_personality\"\n }\n }\n self.resource = PairingConfigurationResource()\n self.test_route = '/v1/pairing/configure'\n self.api.add_route(self.test_route, self.resource)\n\n def test_should_return_400_on_bad_secret(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration_bad_secret))\n self.assertEqual(falcon.HTTP_400, self.srmock.status)\n\n def test_should_return_400_on_bad_personality(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration_bad_personality))\n self.assertEqual(falcon.HTTP_400, self.srmock.status)\n\n def test_should_return_200_on_post(self):\n with patch('meniscus.api.pairing.resources.PairingProcess',\n MagicMock()):\n self.simulate_request(\n self.test_route,\n method='POST',\n headers={\n 'content-type': 'application/json',\n },\n body=jsonutils.dumps(self.configuration))\n self.assertEqual(falcon.HTTP_200, self.srmock.status)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"meniscus/tests/api/pairing/resources_test.py","file_name":"resources_test.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"475319257","text":"#!/usr/bin/env python3\n#coding:UTF-8\n\"\"\"#coding=UTF-8 专门为版本2 解决默认编码问题,可以识别中文\"\"\"\n\nimport sys\n\nsys.stdout.write('\\033[32;46;1m__name__ is %s\\n\\033[0m' % __name__)\n\nclass Factoryclass:\n def __init__(self, ph, email):\n self.ph = ph\n self.email = email\n\n def update_ph(self, newph):\n self.ph = newph\n\n def get_ph(self):\n return self.ph\n\nclass BearToy:\n\n #实例化类 产生的一个实例 默认会调用的方法__init__函数\n\n def __init__(self, size, color, ph, email):\n self.size = size\n self.color = color\n self.factory = Factoryclass(ph, email)\n\n def sing(self,song): #self必不可少\n print('lalala...', song)\n \n def update_color(self, newcolor):\n self.color = newcolor\n def get_color(self):\n return self.color\n\nclass NewBearToy(BearToy): #在圆括号中写明从哪个父类继承\n def run(self):\n print('running ----------')\n def sing(self): #self必不可少,子类覆盖父的同名方法\n print('lalala...song....NewBearToy(BearToy)....')\n\n\n\n\nif __name__ == '__main__':\n sys.stdout.write('\\033[31;47;1msys.argv is %s\\n\\033[0m' % sys.argv)\n tidy = BearToy('small', 'orange', 123456, 'xixi@qq.com')\n print(tidy)\n #<__main__.BearToy object at 0x7fbd56db6320>\n print(type(tidy))\n #\n print(tidy.size, tidy.color,sep= ' --- ')\n #small --- orange\n print(tidy.sing('hehehe'))\n #lalala... hehehe\n #None\n print('---------------')\n print(BearToy('larger','brown', 123, 'xx@qq.com'))\n #<__main__.BearToy object at 0x7f7bd14a64e0>\n\n print(type(BearToy('larger','brown', 123, 'xx@qq.com')))\n #\n\n print(BearToy('larger','brown', 123, 'xx@qq.com').sing('newbeartoySing'))\n #lalala... newbeartoySing\n #None\n \n tidy.color = 'red' #不推荐使用这样的用法\n print(tidy.size, tidy.color)\n #small red\n\n tidy.update_color('green')\n\n print(tidy.size, tidy.get_color())\n #small green\n\n\n tidy2 = BearToy('small', 'orange', 1234, 'hiys@163.com')\n\n print(tidy2.factory.get_ph())\n\n b1 = NewBearToy('larger','brown', 123, 'xx@qq.com')\n# b1.sing('yiyiyiyi---')\n #lalala... yiyiyiyi---\n b1.run()\n #running ----------\n b1.sing()\n #lalala...song....NewBearToy(BearToy)....\n \n print(BearToy.sing(tidy2,'sssssss---tidy2---')) #很少使用,借用身份运行方法\n #lalala... sssssss---tidy2---\n #None\n \n\n\n","sub_path":"day07/ooptoy.py","file_name":"ooptoy.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"547176906","text":"#!/usr/bin/env python\n#coding:utf-8\nimport commands\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\nimport smtplib\n\nclass Send_Messages(object):\n\n def __init__(self):\n self.from_ddr = 'lihongwei549@163.com'\n self.password = 'Lhw17733244915'\n self.smtp_server = 'smtp.163.com'\n\n def _format_addr(self, s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\n def send(self, to_addr, messages):\n to_addr = to_addr\n msg = MIMEText(messages, 'plain', 'utf-8')\n msg['From'] = self._format_addr('EasyMonitor <%s>' % self.from_ddr)\n msg['To'] = self._format_addr('管理员 <%s>' % to_addr)\n msg['Subject'] = Header('服务器状态警告', 'utf-8').encode()\n server = smtplib.SMTP(self.smtp_server, 25)\n server.login(self.from_ddr, self.password)\n server.sendmail(self.from_ddr, [to_addr], msg.as_string())\n server.quit()\n\n\nclass Monitor_ecs(Send_Messages):\n\n def __init__(self):\n super(Monitor_ecs, self).__init__()\n\n def shell_cpu(self):\n shell_command = \"top -b -n 2 |grep 'Cpu(s):' |awk '{print $2}' |sort -rn |tail -1\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n cpu = None\n else:\n cpu = result.split('%')[0]\n return cpu\n\n def shell_gdis(self):\n shell_command = \"df -h |sed -n '2'p |awk '{print $2,$3,$4,$5}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n gdisk = None\n else:\n Size, Used, Avail, Use = result.split()[0:]\n gdisk = Use.split('%')[0]\n return gdisk\n\n def shell_opt(self):\n shell_command = \"df -h |grep '/u01' |awk '{print $2,$3,$4,$5}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n disk_u01 = None\n else:\n Size, Used, Avail, Use = result.split()[0:]\n disk_u01 = Use.split('%')[0]\n return disk_u01\n\n def shell_mem(self):\n shell_command = \"free -m |egrep 'buff|Mem' | sed s/Mem:/11\\ Mem:/g |awk '{print $3}' |xargs |awk ' { print $3/$2*100 }'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n mem = None\n else:\n mem = result.split()[0]\n return mem\n\n def shell_w(self):\n shell_command = \"w |grep 'load average' |awk '{print $(NF-2) $(NF-1) $NF}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n load = None\n else:\n five_min, ten_min, fif_min = result.split(',')[0:]\n load = fif_min\n return load\n\n def shell_inode(self):\n shell_command = \"df -i | grep -E '/|u01' | sort -rn -k 5 | head -n 1 | awk '{print $(NF-1)}'\"\n status, result = commands.getstatusoutput(shell_command)\n if status != 0:\n inode = None\n else:\n use = result.split()[0:]\n inode = use[0].split('%')[0]\n return inode\n\ndef main(list, host):\n obj = Monitor_ecs()\n cpu = obj.shell_cpu()\n if cpu != None:\n if float(cpu) > 80:\n messages = host + \":cpu当前为:\" + cpu + \"%\"\n for i in list:\n obj.send(i, messages)\n\n mem = obj.shell_mem()\n if mem != None:\n if float(mem) > 80:\n messages = host + \":mem当前为:\" + mem + \"%\"\n for i in list:\n obj.send(i, messages)\n\n load = obj.shell_w()\n if load != None:\n if float(load) > 15:\n messages = host + \":load当前为:\" + load\n for i in list:\n obj.send(i, messages)\n\n gdisk = obj.shell_gdis()\n if gdisk != None:\n if float(gdisk) > 90:\n messages = host + \":根目录当前使用空间为“\" + gdisk + \"%\"\n for i in list:\n obj.send(i, messages)\n\n disk_u01 = obj.shell_opt()\n if disk_u01 != None:\n if float(disk_u01) > 90:\n messages = host + \":disk_u01当前使用空间为:\" + gdisk + \"%\"\n for i in list:\n obj.send(i, messages)\n\n inode = obj.shell_inode()\n if inode != None:\n if float(inode) > 95:\n messages = host + \":inode当前使用空间为:\" + inode + \"%\"\n for i in list:\n obj.send(i, messages)\n\nif __name__ == \"__main__\":\n\n '''configPath'''\n to_addr_list = [\n 'lihw@cloudcc.com',\n 'zuogc@cloudcc.com',\n ]\n hostname = \"tomcat1\"\n\n '''MAN'''\n main(to_addr_list, hostname)\n","sub_path":"PyScripts/monitor_ecs.py","file_name":"monitor_ecs.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"344757453","text":"#!/usr/bin/env python3\nimport numpy as np\nfrom sympy import *\nfrom scipy.special import comb\nfrom itertools import combinations\n\ndef GaussHermitePC(n,p):\n if n==1:\n xi = symbols('xi')\n Hp = Matrix([((1/sqrt(2))**i)*hermite(i, xi/sqrt(2)) for i in range(p+1)])\n psi = Hp\n return psi\n else:\n xi = symbols('xi')\n Hp = Matrix([((1/sqrt(2))**i)*hermite(i, xi/sqrt(2)) for i in range(p+1)])\n xi_num = [symbols('xi'+str(i)) for i in range(1,n+1)]\n Hp_mv = zeros(p+1,n)\n for i in range(n):\n for j in range(p+1):\n Hp_mv[j,i] = Hp[j].subs([(xi,xi_num[i])])\n psi_size = int(comb(n+p,p))\n psi = zeros(psi_size,1)\n index = [np.zeros((1,n),dtype='float32')]\n for i in range(1,p+1):\n numi = np.array(list(combinations(list(range(1,n+i)),n-1)))\n num1 = np.zeros((numi.shape[0],1),dtype='float32')\n num2 = (n+i) + num1\n concat = np.hstack((num1,numi,num2))\n indexi = np.flipud(np.diff(concat,n=1,axis=1))-1\n index = index + indexi.tolist()\n if not np.allclose(np.sum(indexi,axis=1), i *np.ones((int(comb(n+i-1,n-1)),1))):\n print('The sum of each row has to be equal to p-th order')\n return\n index_mat = np.vstack(index)\n for i in range(1, psi_size+1):\n mult_s = 1\n for j in range(n):\n mult_s = mult_s * Hp_mv[int(index_mat[i-1][j]),j]\n psi[i-1] = mult_s\n return psi\n \nif __name__ == \"__main__\":\n psi2 = GaussHermitePC(7,1)\n init_printing()\n print(psi2)\n","sub_path":"src/mstar_guidance/src/stoctrajopt/stoctrajopt/gPC_toolbox/GaussHermitePC.py","file_name":"GaussHermitePC.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"339892128","text":"#!/usr/bin/env python\n\nimport subprocess\n\n\"\"\"\nA ssh based command diapath system\n\"\"\"\nmachines = [ \"10.10.10.28\",\n\"10.10.10.29\",\n\"10.10.10.30\",\n\"10.10.10.31\",\n\"10.10.10.32\"]\n\ncmd = \"uname\"\nfor machine in machines:\n subprocess.call(\"printf '%s OS type is: ';ssh root@%s %s\" % (machine,machine,cmd),shell=True)\n","sub_path":"python/dispath.py","file_name":"dispath.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"626901325","text":"sal = int(input('salario?'))\nimp = 27.0\nwhile imp > 0.:\n imp = input(\"imposto ou (s) para sair\")\n if not imp:\n imp = 27.\n elif imp == 's':\n break\n else:\n imp = float(imp)\n print(\"Valor real: {0}\".format(sal - (sal * (imp * 0.01))))\n","sub_path":"cap3/salario-while.py","file_name":"salario-while.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"357394460","text":"import logging\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, List, Optional\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import QuerySet\nfrom django.template.defaultfilters import truncatechars_html\nfrom django.utils.html import format_html\n\nfrom entities.serializers import EntitySerializer\nfrom modularhistory.constants.strings import EMPTY_STRING\nfrom modularhistory.fields import ArrayField, HistoricDateTimeField, HTMLField\nfrom modularhistory.models import (\n ModelWithComputations,\n ModelWithImages,\n ModelWithRelatedEntities,\n ModelWithRelatedQuotes,\n TaggableModel,\n TypedModel,\n retrieve_or_compute,\n)\nfrom modularhistory.structures import HistoricDateTime as DateTime\n\nif TYPE_CHECKING:\n from entities.models import Categorization\n\nNAME_MAX_LENGTH: int = 100\n\nTRUNCATED_DESCRIPTION_LENGTH: int = 1200\n\nPARTS_OF_SPEECH = (\n ('noun', 'noun'),\n ('adj', 'adjective'),\n ('any', 'noun / adjective'),\n)\n\n\nclass Entity(\n TypedModel,\n TaggableModel,\n ModelWithComputations,\n ModelWithImages,\n ModelWithRelatedQuotes,\n ModelWithRelatedEntities,\n):\n \"\"\"An entity.\"\"\"\n\n name = models.CharField(max_length=NAME_MAX_LENGTH, unique=True)\n unabbreviated_name = models.CharField(\n max_length=NAME_MAX_LENGTH, unique=True, null=True, blank=True\n )\n aliases = ArrayField(\n models.CharField(max_length=NAME_MAX_LENGTH), null=True, blank=True\n )\n birth_date = HistoricDateTimeField(null=True, blank=True)\n death_date = HistoricDateTimeField(null=True, blank=True)\n description = HTMLField(null=True, blank=True, paragraphed=True)\n categories = models.ManyToManyField(\n 'entities.Category',\n through='entities.Categorization',\n related_name='entities',\n blank=True,\n )\n images = models.ManyToManyField(\n 'images.Image',\n through='entities.EntityImage',\n related_name='entities',\n blank=True,\n )\n affiliated_entities = models.ManyToManyField(\n 'self', through='entities.Affiliation', blank=True\n )\n\n class Meta:\n \"\"\"\n Meta options for the Entity model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Entities'\n ordering = ['name']\n\n searchable_fields = ['name', 'aliases', 'description']\n serializer = EntitySerializer\n\n def __str__(self) -> str:\n \"\"\"Return the string representation of the entity.\"\"\"\n return f'{self.name}'\n\n def save(self, *args, **kwargs):\n \"\"\"Save the entity to the database.\"\"\"\n self.clean()\n super().save(*args, **kwargs)\n\n def clean(self):\n \"\"\"Prepare the entity to be saved.\"\"\"\n super().clean()\n if not self.unabbreviated_name:\n self.unabbreviated_name = self.name\n if self.type == 'entities.entity' or not self.type:\n raise ValidationError('Entity must have a type.')\n else:\n # Prevent a RuntimeError when saving a new publication\n self.recast(self.type)\n\n @property\n def has_quotes(self) -> bool:\n \"\"\"Return whether the entity has any attributed quotes.\"\"\"\n return bool(len(self.quotes.all()))\n\n @property\n def name_html(self) -> str:\n \"\"\"Return an HTML string of the entity's name.\"\"\"\n logging.debug(f'Getting name_html for {self}')\n return format_html(\n f'{self.name}'\n )\n\n @property\n def truncated_description(self) -> str:\n \"\"\"Return the entity's description, truncated.\"\"\"\n return format_html(\n truncatechars_html(self.description, TRUNCATED_DESCRIPTION_LENGTH)\n )\n\n def get_categorization(self, date: DateTime) -> Optional['Categorization']:\n \"\"\"Return the most applicable categorization based on the date.\"\"\"\n if not self.categories.exists():\n return None\n categorizations = self.categorizations.all()\n categorizations = (\n categorizations.exclude(date__gt=date) if date else categorizations\n )\n if not len(categorizations):\n categorizations = self.categorizations.all()\n return categorizations.order_by('date', 'category__weight').last()\n\n def get_categorizations(\n self, date: Optional[DateTime] = None\n ) -> 'QuerySet[Categorization]':\n \"\"\"Return a list of all applicable categorizations.\"\"\"\n categorizations = (\n self.categorizations.exclude(date__gt=date)\n if date\n else self.categorizations.all()\n )\n return categorizations.select_related('category')\n\n @retrieve_or_compute(attribute_name='categorization_string')\n def get_categorization_string(self, date: Optional[DateTime] = None) -> str:\n \"\"\"Intelligently build a categorization string, like `liberal scholar`.\"\"\"\n categorizations: 'QuerySet[Categorization]' = self.get_categorizations(date)\n if categorizations:\n # Build the string\n categorization_words: List[str] = []\n for part_of_speech in ('noun', 'any', 'adj'):\n pos_categorizations = categorizations.filter(\n category__part_of_speech=part_of_speech\n )\n if pos_categorizations.exists():\n categorization_str = str(\n pos_categorizations.order_by('category__weight', 'date').last()\n )\n words = [\n word\n for word in categorization_str.split(' ')\n if word not in categorization_words\n ]\n categorization_words = words + categorization_words\n # Remove duplicate words\n categorization_words = list(dict.fromkeys(categorization_words))\n return ' '.join(categorization_words)\n return EMPTY_STRING\n\n\nclass Person(Entity):\n \"\"\"A person.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Person model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'People'\n\n\nclass Deity(Entity):\n \"\"\"A deity.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Deity model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Deities'\n\n\nclass Group(Entity):\n \"\"\"A group of people.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Group model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Groups'\n\n\nclass Organization(Entity):\n \"\"\"An organization.\"\"\"\n\n class Meta:\n \"\"\"\n Meta options for the Organization model.\n\n See https://docs.djangoproject.com/en/3.1/ref/models/options/#model-meta-options.\n \"\"\"\n\n verbose_name_plural = 'Organizations'\n\n @property\n def founding_date(self) -> datetime:\n \"\"\"Return the date the organization was founded.\"\"\"\n return self.birth_date\n","sub_path":"entities/models/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":7302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"375312052","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"Main entry point\n\"\"\"\nfrom pyramid.config import Configurator\nfrom metlog.config import client_from_stream_config\nfrom campaign.resources import Root\nfrom campaign.storage.sql import Storage\nfrom mozsvc.config import load_into_settings\nfrom mozsvc.middlewares import _resolve_name\n\n\ndef get_group(group_name, dictionary):\n if group_name is None:\n return dictionary\n else:\n result = {}\n trim = len(group_name) + 1\n for key in filter(lambda x: x.startswith(group_name), dictionary):\n result[key[trim:]] = dictionary[key]\n return result\n\ndef configure_from_settings(object_name, settings):\n config = dict(settings)\n if 'backend' not in config:\n if '%s.backend' % object_name in config:\n config = get_group(object_name, config)\n cls = _resolve_name(config.pop('backend'))\n return cls(**config)\n\n\ndef main(global_config, **settings):\n load_into_settings(global_config['__file__'], settings)\n config = Configurator(root_factory=Root, settings=settings)\n config.include(\"cornice\")\n config.include(\"pyramid_beaker\")\n config.include(\"mozsvc\")\n config.scan(\"campaign.views\")\n config.registry['storage'] = Storage(config)\n config.registry['auth'] = configure_from_settings('auth',\n settings['config'].get_map('auth'))\n metlog_client = client_from_stream_config(\n open(global_config['__file__'], 'r'),\n 'metlog')\n config.registry['metlog'] = metlog_client\n return config.make_wsgi_app()\n\n\nclass LOG:\n EMERGENCY = 0\n ALERT = 1\n CRITICAL = 2\n ERROR = 3\n WARNING = 4\n NOTICE = 5\n INFORMATIONAL = 6\n DEBUG = 7\n","sub_path":"campaign/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"553209483","text":"from tensorflow.keras.layers import Input, concatenate, Dropout, Activation, MaxPooling2D, Convolution2D, \\\n AveragePooling2D, BatchNormalization, Reshape, Conv2DTranspose, Cropping2D\nfrom tensorflow.keras.models import Model\n\n\"\"\"\nImplementation of Inception Network v4 [Inception Network v4 Paper](http://arxiv.org/pdf/1602.07261v1.pdf) in Keras.\n\"\"\"\nchannel_axis = 3\n\n\ndef conv_block(x, nb_filter, nb_row, nb_col, padding='same', subsample=(1, 1), bias=False):\n x = Convolution2D(filters=nb_filter, kernel_size=(nb_row, nb_col), strides=subsample, padding=padding, use_bias=bias)(x)\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation('relu')(x)\n return x\n\n\ndef inception_stem(input):\n # Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)\n x = conv_block(input, 32, 3, 3, subsample=(2, 2), padding='valid')\n x = conv_block(x, 32, 3, 3, padding='valid')\n x = conv_block(x, 64, 3, 3)\n\n x1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)\n x2 = conv_block(x, 96, 3, 3, subsample=(2, 2), padding='valid')\n\n x = concatenate([x1, x2], axis=channel_axis)\n\n x1 = conv_block(x, 64, 1, 1)\n x1 = conv_block(x1, 96, 3, 3, padding='valid')\n\n x2 = conv_block(x, 64, 1, 1)\n x2 = conv_block(x2, 64, 1, 7)\n x2 = conv_block(x2, 64, 7, 1)\n x2 = conv_block(x2, 96, 3, 3, padding='valid')\n\n x = concatenate([x1, x2], axis=channel_axis)\n\n x1 = conv_block(x, 192, 3, 3, subsample=(2, 2), padding='valid')\n x2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)\n\n x = concatenate([x1, x2], axis=channel_axis)\n return x\n\n\ndef inception_A(input):\n a1 = conv_block(input, 96, 1, 1)\n\n a2 = conv_block(input, 64, 1, 1)\n a2 = conv_block(a2, 96, 3, 3)\n\n a3 = conv_block(input, 64, 1, 1)\n a3 = conv_block(a3, 96, 3, 3)\n a3 = conv_block(a3, 96, 3, 3)\n\n a4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n a4 = conv_block(a4, 96, 1, 1)\n\n m = concatenate([a1, a2, a3, a4],axis=channel_axis)\n return m\n\n\ndef inception_B(input):\n b1 = conv_block(input, 384, 1, 1)\n\n b2 = conv_block(input, 192, 1, 1)\n b2 = conv_block(b2, 224, 1, 7)\n b2 = conv_block(b2, 256, 7, 1)\n\n b3 = conv_block(input, 192, 1, 1)\n b3 = conv_block(b3, 192, 7, 1)\n b3 = conv_block(b3, 224, 1, 7)\n b3 = conv_block(b3, 224, 7, 1)\n b3 = conv_block(b3, 256, 1, 7)\n\n b4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n b4 = conv_block(b4, 128, 1, 1)\n\n m = concatenate([b1, b2, b3, b4], axis=channel_axis)\n return m\n\n\ndef inception_C(input):\n c1 = conv_block(input, 256, 1, 1)\n\n c2 = conv_block(input, 384, 1, 1)\n c2_1 = conv_block(c2, 256, 1, 3)\n c2_2 = conv_block(c2, 256, 3, 1)\n c2 = concatenate([c2_1, c2_2], axis=channel_axis)\n\n c3 = conv_block(input, 384, 1, 1)\n c3 = conv_block(c3, 448, 3, 1)\n c3 = conv_block(c3, 512, 1, 3)\n c3_1 = conv_block(c3, 256, 1, 3)\n c3_2 = conv_block(c3, 256, 3, 1)\n c3 = concatenate([c3_1, c3_2], axis=channel_axis)\n\n c4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)\n c4 = conv_block(c4, 256, 1, 1)\n\n m = concatenate([c1, c2, c3, c4], axis=channel_axis)\n return m\n\n\ndef reduction_A(input):\n r1 = conv_block(input, 384, 3, 3, subsample=(2, 2), padding='valid')\n\n r2 = conv_block(input, 192, 1, 1)\n r2 = conv_block(r2, 224, 3, 3)\n r2 = conv_block(r2, 256, 3, 3, subsample=(2, 2), padding='valid')\n\n r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)\n\n m = concatenate([r1, r2, r3], axis=channel_axis)\n return m\n\n\ndef reduction_B(input):\n r1 = conv_block(input, 192, 1, 1)\n r1 = conv_block(r1, 192, 3, 3, subsample=(2, 2), padding='valid')\n\n r2 = conv_block(input, 256, 1, 1)\n r2 = conv_block(r2, 256, 1, 7)\n r2 = conv_block(r2, 320, 7, 1)\n r2 = conv_block(r2, 320, 3, 3, subsample=(2, 2), padding='valid')\n\n r3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)\n\n m = concatenate([r1, r2, r3], axis=channel_axis)\n return m\n\n\ndef create_model():\n '''\n Creates a inception v4 network\n\n :return: Keras Model with 1 input and 1 output\n '''\n\n init = Input((400, 608, 1))\n\n # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)\n x = inception_stem(init)\n\n # 4 x Inception A\n for i in range(4):\n x = inception_A(x)\n\n # Reduction A\n x = reduction_A(x)\n\n # 7 x Inception B\n for i in range(7):\n x = inception_B(x)\n\n # Reduction B\n x = reduction_B(x)\n\n # 3 x Inception C\n for i in range(3):\n x = inception_C(x)\n\n # Average Pooling\n x = AveragePooling2D((2, 2))(x) # was (8,8)\n\n # Dropout\n x = Dropout(0.8)(x)\n #x = Flatten()(x)\n\n x = Reshape((240, 256, 1))(x)\n x = Conv2DTranspose(filters=1, kernel_size=(2, 2), strides=2, padding='same')(x)\n # output = Conv2D(1, (1, 1), padding=\"same\", activation=None)(uconv1)\n x = Cropping2D(cropping=(139, 105))(x) # crop of (400,304) to (202,302)\n\n model = Model(init, x, name='inception_v4')\n\n return model\n\n\nmodel = create_model()\nmodel.summary()\n","sub_path":"inception_v4_fwi.py","file_name":"inception_v4_fwi.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"227469918","text":"import numpy as np\n\ndef kalman_filter(mu, sig):\n for n, measurement in enumerate(measurements):\n mu_bar = A * mu + B * u\n sig_bar = A * sig * A.transpose()\n\n s = C * sig_bar * C.transpose() + Q\n K = sig_bar * C.transpose() * np.linalg.inv(s)\n\n z = np.matrix([[measurement]])\n mu = mu_bar + K * (z - C * mu_bar)\n sig = (I - K * C) * sig_bar\n return mu, sig\n\nmeasurements = [1, 2, 3, 4, 5]\n\nmu = np.matrix([[0.], [0.]])\nsig = np.matrix([[1000., 0.], [0., 1000.]])\nu = np.matrix([[0.], [0.]])\nA = np.matrix([[1., 1.], [0, 1.]])\nC = np.matrix([[1., 0.]])\nQ = np.matrix([[1.]])\nI = np.eye(2)\nB = np.eye(2)\n\nprint(kalman_filter(mu, sig))","sub_path":"KalmanFilter/kalman_nd.py","file_name":"kalman_nd.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"149126270","text":"# coding=utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport os\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\nlogging.basicConfig(level=logging.DEBUG)\n\nsessionStorage = {}\n\nskill_id = \"caee0d3a-e0ff-4720-a4ac-e45205aee08b\"\ntoken = \"AgAAAAAIOCSpAAT7o82ir0CsuUqWn1L6FO9DXZE\"\n\n\n@app.route(\"/\", methods=['POST'])\ndef main():\n logging.info('Request: %r', request.json)\n\n response = {\n \"version\": request.json['version'],\n \"session\": request.json['session'],\n \"response\": {\n \"end_session\": False\n }\n }\n\n handle_dialog(request.json, response)\n\n logging.info('Response: %r', response)\n\n return json.dumps(\n response,\n ensure_ascii=False,\n indent=2\n )\n\n\ndef handle_dialog(req, res):\n user_id = req['session']['user_id']\n\n url = \"https://ru.meming.world/wiki/Special:Random\"\n page = requests.get(url)\n soup = BeautifulSoup(page.text, \"html.parser\")\n mainText = soup.find_all('h1')[0].get_text()\n images = soup.findAll('img')\n mainImageUrl = \"https://ru.meming.world/\" + images[0]['src']\n\n skillsUrl = 'https://dialogs.yandex.net/api/v1/skills/' + skill_id + '/images'\n headers = {'content-type': 'application/json', 'Authorization': 'OAuth ' + token}\n r = requests.post(skillsUrl, json={\"url\": mainImageUrl}, headers=headers)\n\n if req['session']['new']:\n sessionStorage[user_id] = {\n 'suggests': [\n \"Хочу\",\n \"Не хочу\",\n ]\n }\n\n res['response']['text'] = 'Привет, хочешь мем?'\n res['response']['buttons'] = get_buttons(user_id)\n return\n\n if req['request']['original_utterance'].lower() in [\n 'мемчанский',\n 'мем',\n 'новый мем',\n 'да',\n 'хочу',\n ]:\n cardImages = [{\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }, {\n \"image_id\": r.json()['image']['id']\n }]\n\n res['response']['text'] = ''\n # res['response']['card'] = {}\n # res['response']['card']['type'] = 'BigImage'\n # res['response']['card']['image_id'] = r.json()['image']['id']\n # res['response']['card']['title'] = mainText\n res['response']['card'] = {}\n res['response']['card']['type'] = 'ItemsList'\n res['response']['card']['items'] = cardImages\n return\n\n res['response']['text'] = ''\n res['response']['buttons'] = get_buttons(user_id)\n return\n\n\n# Функция возвращает две подсказки для ответа.\ndef get_buttons(user_id):\n session = sessionStorage[user_id]\n\n # Выбираем две первые подсказки из массива.\n suggests = [\n {'title': suggest, 'hide': True}\n for suggest in session['suggests'][:2]\n ]\n\n # Убираем первую подсказку, чтобы подсказки менялись каждый раз.\n session['suggests'] = session['suggests'][1:]\n sessionStorage[user_id] = session\n\n suggests.append({\n \"title\": \"Ссылочка\",\n \"url\": \"https://market.yandex.ru/search?text=слон\",\n \"hide\": True\n })\n\n return suggests\n\n\napp.run(host=\"0.0.0.0\", port=int(os.environ.get('PORT', 5000)))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"363335347","text":"import os\nimport codecs\nfrom Downloader import Downloader\n\nnl = '\\r\\n'\ndoc_help = \"下载的小说文件及配置文件在 ../books 目录下。\" + nl + \\\n \"支持导出文件格式: txt,epub\" + nl + \\\n \"图片章节可通过修改配置文件中的area_width,font,font_size,bg_color_name,text_color_name实现不同文字效果\" + nl + \\\n \"area_width:图片宽度; 默认:816\" + nl + \\\n \"font:字体; 默认:undefined\" + nl + \\\n \"font_size:字体大小; 默认:14\" + nl + \\\n \"bg_color_name:背景颜色; 默认:default; 可用设置:default,green,blue,white,gray,pink,night;\" + nl + \\\n \"text_color_name:文字颜色; 默认:default; 可用设置:default,green,blue,white,gray,pink,night;\"\n\n\ndef str_mid(string: str, left: str, right: str, start=None, end=None):\n pos1 = string.find(left, start, end)\n if pos1 > -1:\n pos2 = string.find(right, pos1 + len(left), end)\n if pos2 > -1:\n return string[pos1 + len(left): pos2]\n return ''\n\n\nprint(\"请先登录你的欢乐书客帐号,之后得到一些Cookies并输入程序。\")\nprint(\"若不登录则直接留空所有Cookies,若输入del则清除已保存的Cookies。\")\n\nlogin_token = \"\"\nreader_id = \"\"\narea_width = \"816\"\nfont = \"undefined\"\nfont_size = \"14\"\nbg_color_name = \"default\"\ntext_color_name = \"default\"\nif not os.path.isdir(os.getcwd() + \"/../books\"):\n os.makedirs(os.getcwd() + \"/../books\")\nif os.path.isfile(os.getcwd() + \"/../books/hbookercrawler.cfg\"):\n cfg_file = codecs.open(os.getcwd() + \"/../books/hbookercrawler.cfg\", 'r', 'utf-8')\n for line in cfg_file.readlines():\n if line.startswith(\"login_token=\"):\n login_token = str_mid(line, 'login_token=\"', '\"')\n elif line.startswith(\"reader_id=\"):\n reader_id = str_mid(line, 'reader_id=\"', '\"')\n elif line.startswith(\"area_width=\"):\n area_width = str_mid(line, 'area_width=\"', '\"')\n elif line.startswith(\"font=\"):\n font = str_mid(line, 'font=\"', '\"')\n elif line.startswith(\"font_size=\"):\n font_size = str_mid(line, 'font_size=\"', '\"')\n elif line.startswith(\"bg_color_name=\"):\n bg_color_name = str_mid(line, 'bg_color_name=\"', '\"')\n elif line.startswith(\"text_color_name=\"):\n text_color_name = str_mid(line, 'text_color_name=\"', '\"')\n cfg_file.close()\n\nlogin_token = input('Cookie: login_token(默认:\"' + login_token + '\")=') or login_token\nreader_id = input('Cookie: reader_id(默认:\"' + reader_id + '\")=') or reader_id\n\nif reader_id.lower().startswith('del') or login_token.lower().startswith('del'):\n reader_id = \"\"\n login_token = \"\"\n print(\"已清除Cookies!\")\n\nwith codecs.open(os.getcwd() + \"/../books/hbookercrawler.cfg\", 'w', 'utf-8') as cfg_file:\n cfg_file.write('login_token=\"' + login_token + '\"' + nl)\n cfg_file.write('reader_id=\"' + reader_id + '\"' + nl)\n cfg_file.write('area_width=\"' + area_width + '\"' + nl)\n cfg_file.write('font=\"' + font + '\"' + nl)\n cfg_file.write('font_size=\"' + font_size + '\"' + nl)\n cfg_file.write('bg_color_name=\"' + bg_color_name + '\"' + nl)\n cfg_file.write('text_color_name=\"' + text_color_name + '\"')\ndel cfg_file\n\ndl = Downloader(login_token, reader_id)\ndl.area_width = area_width\ndl.font = font\ndl.font_size = font_size\ndl.bg_color_name = bg_color_name\ndl.text_color_name = text_color_name\ndel login_token\ndel reader_id\ndel area_width\ndel font\ndel font_size\ndel bg_color_name\ndel text_color_name\n\nworking_dir = os.getcwd() + \"/../books\"\n\n\ndef select_chapter(book, skip_input):\n try:\n while True:\n while True:\n try:\n if skip_input:\n chapter_start = book[\"last\"] + 1\n chapter_end = len(book[\"chapter\"])\n else:\n chapter_start = int(input(\"输入开始章节编号(留空将自动寻找):\") or book[\"last\"] + 1)\n chapter_end = int(input(\"输入结束章节编号(留空将自动寻找):\") or len(book[\"chapter\"]))\n break\n except ValueError:\n continue\n if chapter_start < 1:\n chapter_start = 1\n if chapter_start > len(book[\"chapter\"]):\n if skip_input:\n print(\"小说暂无更新...\")\n else:\n input(\"小说暂无更新...\")\n break\n if chapter_start <= chapter_end:\n print(\"开始章节编号:\", chapter_start,\n \"chapter_id:\", book[\"chapter\"][chapter_start - 1][0],\n \"标题:\", book[\"chapter\"][chapter_start - 1][1])\n print(\"结束章节编号:\", chapter_end,\n \"chapter_id:\", book[\"chapter\"][chapter_end - 1][0],\n \"标题:\", book[\"chapter\"][chapter_end - 1][1])\n while True:\n if skip_input:\n return {\"start\": chapter_start, \"end\": chapter_end}\n else:\n confirm = input(\"确定从这个位置下载吗(回车确认,n:重新输入章节编号,q:取消下载):\").lower()\n if not confirm or confirm.startswith('y'):\n return {\"start\": chapter_start, \"end\": chapter_end}\n elif confirm.startswith('n'):\n break\n elif confirm.startswith('q'):\n return None\n else:\n if skip_input:\n return None\n else:\n print(\"输入无效:\", \"开始章节编号\", chapter_start, \"不能大于\", \"结束章节编号\", chapter_end)\n except (KeyboardInterrupt, InterruptedError):\n print(nl, \"已取消输入章节编号\")\n except Exception as e:\n print(\"[ERROR]\", e)\n print(\"读取章节编号时出错\")\n input(\"按下回车键继续...\")\n return None\n\n\ndef selected_book(_book_id, skip_input):\n dl.skip = skip_input\n book = dl.get_book(_book_id)\n book = dl.check_book(book)\n book = dl.fix_book(book)\n selected = select_chapter(book, skip_input)\n if selected:\n dl.download(book, selected)\n\n\ndef update_downloaded():\n try:\n book_list = list()\n for title in os.listdir(working_dir):\n cfg_path = working_dir + '/' + title + '/' + title + '.cfg'\n if os.path.isfile(cfg_path):\n with codecs.open(cfg_path, 'r', 'utf-8') as file:\n file_lines = file.readlines()\n for _line in file_lines:\n if _line.startswith('book_id='):\n book_list.append(str_mid(_line, 'book_id=\"', '\"'))\n for _book_id in book_list:\n selected_book(_book_id, True)\n except (KeyboardInterrupt, InterruptedError):\n print(nl, \"已取消更新全部小说\")\n except Exception as e:\n print(\"[ERROR]\", e)\n print(\"更新小说时出错\")\n input(\"按下回车键继续...\")\n\n\ndef solve_input(inputs):\n if inputs.startswith('q'):\n exit()\n return True\n elif inputs.startswith('h'):\n print(doc_help)\n return True\n elif inputs.startswith('ud'):\n update_downloaded()\n return True\n elif inputs.startswith('v'):\n dl.vip = not dl.vip\n if dl.vip:\n print(\"已设置下载VIP章节\")\n else:\n print(\"已设置跳过VIP章节\")\n return True\n return False\n\n\ndl.get_bookshelf()\n\nif dl.nickname != '${NoName}':\n while True:\n for bookshelf_info in dl.bookshelf:\n print(\"编号:\", bookshelf_info[0], \"book_id:\", bookshelf_info[1], \"书名:\", bookshelf_info[2])\n while True:\n try:\n book_id = input(\"输入小说编号或小说id(book_id)(q:退出,h:帮助,ud:更新已下载的小说,ua:更新书架中的小说,v:设置是否下载VIP章节):\").lower()\n if not solve_input(book_id):\n if book_id.startswith(\"ua\"):\n for _bookshelf_info in dl.bookshelf:\n selected_book(_bookshelf_info[1], True)\n if 0 < int(book_id) <= len(dl.bookshelf):\n book_id = dl.bookshelf[int(book_id) - 1][1]\n selected_book(book_id, False)\n break\n except ValueError:\n continue\nelse:\n while True:\n book_id = input(\"输入小说id(book_id)(q:退出,h:帮助,u:更新已下载的小说,v:设置是否下载VIP章节):\").lower()\n if not solve_input(book_id):\n selected_book(book_id, False)\n","sub_path":"HbookerCrawler.py","file_name":"HbookerCrawler.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"78197126","text":"import pprint as pp\r\nimport contextlib\r\nimport pandas as pd\r\nfrom savReaderWriter import SavReader as sr, Generic as gn\r\nfrom sqlalchemy import create_engine\r\n\r\nmydb = create_engine('mssql+pymssql://Emeatristsql/{}'.format(\"SSIVRDATA\"))\r\n\"\"\"\r\n select * from SPSS_Project_List\r\n\r\n select * from SPSS_Var_List\r\n\r\n\tselect * from spps_varlist_temp\r\n\r\n\tselect * from spps_desclist_temp\r\n \r\n\"\"\"\r\ndef wrt_valueLabels(args):\r\n \r\n args[\"Values\"] =args.index.astype(int)\r\n # print(args)\r\n df = pd.melt(args,id_vars=\"Values\",var_name=\"ColumnName\", value_name=\"ColumnValue\")\r\n df = df[df[\"ColumnValue\"].notnull()]\r\n df[\"ColumnValue\"]=df[\"ColumnValue\"].str.decode('cp1254')\r\n df[\"ColumnName\"]=df[\"ColumnName\"].str.decode('cp1254')\r\n # print(df)\r\n return(df)\r\n\r\n\r\ndef wrt_otherVar(args,kwargs):\r\n mydict = {\"varLabels\":\"Qtext\",\"formats\":\"Qformat\",\"varTypes\":\"QTypes\",\"varNames\":\"Qt\",}\r\n t = mydict[(\"{}\".format(kwargs))]\r\n\r\n args.rename(index=str, columns={0: t },inplace=True)\r\n args.reset_index(inplace=True)\r\n args.rename(index=str, columns={\"index\": \"Key\"},inplace=True)\r\n \r\n if t != \"QTypes\" and t != \"Qt\" :\r\n args[t]=args[t].str.decode('cp1254')\r\n \r\n # args.to_csv(r\"\\\\Emeatristsql\\IMP\\PythonScripts\\Create_Spss\\{}.csv\".format(t),sep=\";\",header=True)\r\n # print(args)\r\n return(args)\r\n\r\n\r\ndef insert_varlist(args):\r\n args[\"Active\"] = int(1)\r\n args[\"Type\"] = \"Numeric\"\r\n args[\"Sysmiss\"] = int(0)\r\n args[[\"Active\", \"ColumnName\", \"Type\", \"Values\", \"ColumnValue\", \"Sysmiss\"]]\r\n \r\n # print(args)\r\n args.to_sql(name=\"spps_varlist_temp\", con=mydb, if_exists=\"replace\",\r\n index=False, chunksize=1000)\r\n return(args)\r\n\r\ndef main(file_path):\r\n numVars, nCases, varNames, varTypes, printTypesFile, printTypeLabels, varWids = \\\r\n sr(file_path, verbose=True).getSavFileInfo()\r\n \r\n print(sr(file_path).getFileReport())\r\n # print(sr(file_path).getSavFileInfo())\r\n\r\n valueLabels = pd.DataFrame.from_dict(varWids)\r\n valueLabels = wrt_valueLabels(valueLabels)\r\n # valueLabels sav icinden alindi.\r\n\r\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n \r\n varLabels = pd.DataFrame.from_dict(printTypeLabels, orient='index')\r\n varLabels = wrt_otherVar(varLabels,\"varLabels\") \r\n\r\n formats = pd.DataFrame.from_dict(printTypesFile, orient='index')\r\n formats = wrt_otherVar(formats,\"formats\")\r\n formats[\"Qformat\"] = formats[\"Qformat\"].apply(\r\n lambda x: \"String\" if x.startswith('A') else \"Numeric\")\r\n\r\n varTypes = pd.DataFrame.from_dict(varTypes, orient='index')\r\n varTypes = wrt_otherVar(varTypes,\"varTypes\")\r\n\r\n varNames = pd.DataFrame.from_dict(varNames)\r\n varNames = wrt_otherVar(varNames,\"varNames\")\r\n\r\n # varLabels üzerinden merge işlemleri devam edecek.\r\n result = pd.merge(varLabels, varTypes, on=[\"Key\"], how=\"inner\")\r\n result.index = result.index.map(str)\r\n result = pd.merge(result, varNames, right_index=True, left_index=True)\r\n result = pd.merge(result, formats, right_index=True, left_index=True)\r\n result = result[[\"Qt\", \"Qtext\", \"QTypes\", \"Qformat\"]]\r\n # result = result.merge(varNames, left_index=True, right_on='Qt')\r\n result[\"Qt\"]=result[\"Qt\"].str.decode('cp1254')\r\n\r\n # print(result)\r\n result.to_sql(name=\"spps_desclist_temp\", con=mydb, if_exists=\"replace\",\r\n index=False, chunksize=1000)\r\n # print(valueLabels)\r\n insert_varlist(valueLabels)\r\n print(\"Değişkenler tablolara yazdırıldı...\")\r\n\r\nsavFilePath = r\".\\085048_2_SA_C.sav\"\r\n\r\nif __name__ == \"__main__\":\r\n main(savFilePath)\r\n","sub_path":"readSav.py","file_name":"readSav.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"165951324","text":"\"\"\" Analyzes the word frequencies in a book downloaded from\nProject Gutenberg \"\"\"\n\nimport string\nfrom heapq import nlargest\n\n\ndef get_word_list(file_name):\n \"\"\" Reads the specified project Gutenberg book. Header comments,\n punctuation, and whitespace are stripped away. The function\n returns a list of the words used in the book as a list.\n All words are converted to lower case.\n \"\"\"\n\n #strips out header comments\n f = open(file_name, 'r')\n lines = f.readlines()\n curr_line = 0\n while lines[curr_line].find('POIROT EXPLAINS') == -1:\n curr_line += 1\n lines = lines[curr_line+1:]\n\n #the list of all the words in the text\n word_list = []\n\n #strips out whitespace, punctuation, and makes lowercase\n #makes into a list of words\n for line in lines:\n words = line.split()\n for word in words:\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n word_list.append(word)\n\n return word_list\n\n\ndef get_top_n_words(word_list, n):\n \"\"\" Takes a list of words as input and returns a list of the n most frequently\n occurring words ordered from most to least frequently occurring.\n\n word_list: a list of words (assumed to all be in lower case with no\n punctuation\n n: the number of words to return\n returns: a list of n most frequently occurring words ordered from most\n frequently to least frequently occurring\n \"\"\"\n word_counts = dict()\n\n for word in word_list:\n if word not in word_counts:\n word_counts[word] = 1\n else:\n word_counts[word] += 1\n\n topn = []\n\n for word in nlargest(n, word_counts, key=word_counts.get):\n topn.append(word)\n\n return topn\n\nif __name__ == \"__main__\":\n print(\"Running WordFrequency Toolbox\")\n print(string.punctuation)\n\n #code that calls two functions\n wordlist = get_word_list('MysteriousAffair.txt')\n print(get_top_n_words(wordlist, 100))\n","sub_path":"frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"13662679","text":"import feedparser\nfrom strip_html import *\n\nd = feedparser.parse(\"http://the-rinse.com/feed\")\n\nif d.feed.has_key('generator') \\\n and d.feed.generator == \"blog.myspace.com\":\n for entry in d.entries:\n title = entry.title\n desc = entry.description\n date = entry.date\n id = entry.link\n content = entry.content[0].value\nelse:\n for entry in d.entries:\n title = entry.title\n desc = entry.description\n date = entry.date\n id = entry.id\n content = entry.content[0].value\n","sub_path":"feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"590764378","text":"\nfrom socket import *\nimport RPi.GPIO as GPIO\nimport time\nimport sys\n\nhost = \"\"\nport = 13000\nbuf = 1024\naddr = (host, port)\nUDPSock = socket(AF_INET, SOCK_DGRAM)\nUDPSock.bind(addr)\n\nservoPIN = 17\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(servoPIN, GPIO.OUT)\np = GPIO.PWM(servoPIN, 50) # GPIO 17 for PWM with 50Hz\np.start(2.5) # Initialization\n\nprint(\"Waiting to receive messages...\")\n\nwhile True:\n (data, addr) = UDPSock.recvfrom(buf)\n print(\"Received message: \" + data)\n if data == \"left\":\n p.ChangeDutyCycle(2.5)\n time.sleep(0.5)\n elif data == \"mid\":\n p.ChangeDutyCycle(5)\n time.sleep(0.5)\n elif data == \"right\":\n p.ChangeDutyCycle(7.5)\n time.sleep(0.5)\n \n if data == \"exit\":\n break\n\nUDPSock.close()\np.stop()\nGPIO.cleanup()\nsys.exit(1)\n","sub_path":"2_server_udp.py","file_name":"2_server_udp.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"469379580","text":"A = []\nB = []\nC = []\nD = []\nE = []\nMax = 10\n\ndef preencherA ():\n for item in range(0,Max):\n PerguntaA = int(input(\" Escolha os valores para A: \"))\n A.append(PerguntaA)\n\ndef preencherB ():\n for item in range(0,Max):\n PerguntaB = int(input(\"Escolha os valores para B: \"))\n B.append(PerguntaB)\n\ndef preencherC ():\n for itemA in A:\n C.append(itemA)\n for itemB in B:\n C.append(itemB)\n\ndef preencherD ():\n for itemA in A:\n if itemA not in B:\n D.append(itemA)\n\ndef preencherE ():\n for itemA in A:\n if itemA in B:\n if itemA not in E:\n E.append(itemA)\n for itemB in B:\n if itemB not in E:\n E.append(itemB)\n\npreencherA()\npreencherB()\npreencherC()\npreencherD()\npreencherE()\n\nprint(A)\nprint(B)\nprint(C)\nprint(D)\nprint(E)","sub_path":"FichasPraticas/Ficha6.ex4.py","file_name":"Ficha6.ex4.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"629956330","text":"# coding=utf-8\n__author__ = 'huydq17'\n\nimport time\nfrom flask import current_app\nfrom datetime import datetime\n\n\ndef to_string(time_from, time_to):\n if (time_from.day, time_from.month, time_from.year) == (time_to.day, time_to.month, time_to.year):\n return '{0}/{1}/{2}'.format(time_from.day, time_from.month, time_from.year)\n return '{0}/{1}/{2} - {3}/{4}/{5}'.format(time_from.day, time_from.month, time_from.year,\n time_to.day, time_to.month, time_to.year)\n\n\nclass DateTimeWrapper(object):\n def __init__(self):\n # Timestamp is in millisecond\n pass\n\n @staticmethod\n def curr_timestamp(unit='second'):\n if unit == 'millisecond':\n return int(time.time()) * 1000\n return int(time.time())\n\n @staticmethod\n def to_datetime(timestamp, fmt='%H:%M:%S %d-%m-%Y'):\n \"\"\"\n Convert timestamp in second to datetime readable format\n :param timestamp:\n :param fmt:\n :return:\n \"\"\"\n try:\n return datetime.fromtimestamp(int(timestamp)).strftime(fmt)\n except Exception as e:\n current_app.logger.error('Error when convert timestamp to datetime: {0}'.format(str(e)))\n return timestamp\n\n @staticmethod\n def to_timestamp(str_datetime, fmt='%H:%M:%S %d-%m-%Y'):\n \"\"\"\n Convert datetime in string with valid format to timestamp\n \"\"\"\n timestamp = time.mktime(datetime.strptime(str_datetime, fmt).timetuple())\n return int(timestamp)\n\n @staticmethod\n def datetime_to_timestamp(datetime_obj, unit='second'):\n \"\"\"\n Convert python datetime object to timestamp in unit (second or millisecond)\n \"\"\"\n delta = 1000 if unit == 'millisecond' else 1\n return int(time.mktime(datetime_obj.timetuple()) * delta)\n\n @staticmethod\n def rfc3339_to_timestamp(rfc_time, fmt='%Y-%m-%dT%H:%M:%S.%fZ'):\n \"\"\"\n Convert UTC time in RFC 3339 format to timestamp\n Example: 2017-07-19T03:20:13.801Z -> 3h 20m 13s (UTC time) or 10h 20m 13s in GMT+7 time\n :param rfc_time:\n :param fmt\n :return:\n \"\"\"\n utc_dt = datetime.strptime(rfc_time, fmt)\n\n # Convert UTC datetime to seconds since the\n timestamp = (utc_dt - datetime(1970, 1, 1)).total_seconds()\n return int(timestamp)\n\n @staticmethod\n def format_timestamp(timestamp, unit='second'):\n \"\"\"\n Convert timestamp in second (or millisecond) to human readable time\n Example: timestamp = 3661, unit = second -> Readable time = 1h 1m 1s\n \"\"\"\n if timestamp == 'N/A':\n return timestamp\n\n seconds = timestamp\n if unit == 'millisecond': # timestamp in millisecond\n seconds = int(timestamp / 1000)\n days = int(seconds / (24 * 3600))\n tmp = seconds - days * 24 * 3600\n hours = int(tmp / 3600)\n tmp = seconds - days * 24 * 3600 - hours * 3600\n minutes = int(tmp / 60)\n seconds = seconds - days * 24 * 3600 - hours * 3600 - minutes * 60\n\n result = \"0\"\n if days > 0:\n result = str(days) + \" ngày \" + str(hours) + \" giờ \" + str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif hours > 0:\n result = str(hours) + \" giờ \" + str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif minutes > 0:\n result = str(minutes) + \" phút \" + str(seconds) + \" giây\"\n elif seconds > 0:\n result = str(seconds) + \" giây\"\n return result\n","sub_path":"server_worker_api/worker/app/utils/datetimeutils.py","file_name":"datetimeutils.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"392878078","text":"\nimport random\n\nour = 'zhuhaiwenluobiyu'\nnames = ''\nfor s in our:\n if s not in names:\n names += s\n\nend = []\nfor n in range(10):\n name = ''\n length = random.randint(10,20)\n for i in range(length):\n k = names[random.randint(0,len(names)-1)]\n name += k\n end.append(name)\nprint(end)\n","sub_path":"name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"375855126","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.parse import quote\r\nimport parse\r\nclass Link:\r\n def __init__(self):\r\n pass\r\n\r\n def enter_the_inf(self):\r\n print(\"Потрібно ввести назву міста спочатку на російській потім на англійській, нажміть на enter ще раз після першого вводу\")\r\n x = str(input((\"Введіть назву міста:\")))\r\n return x\r\n def poiskpersSin(self, nick):\r\n geourl = \"https://ua.sinoptik.ua/{0}\".format(quote(nick))\r\n return geourl\r\n def poiskpersMet(self, nick):\r\n geourl = \"https://www.meteoprog.ua/ru/{0}\".format(quote(nick))\r\n return geourl\r\n\r\nclass Request:\r\n HEADERS = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (HTML, like Gecko) '\r\n 'Chrome/88.0.4324.190 Safari/537.36',\r\n 'accept': '*/*'}\r\n def __init__(self, URL1):\r\n self.URL1 = URL1\r\n\r\n def get_html(self, params=None):\r\n r = requests.get(self.URL1, headers=self.HEADERS, params=params)\r\n return r.text\r\n\r\nclass Weather_father():\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n self.html = html\r\n self.mainClass = mainClass\r\n self.mainNextDay = mainNextDay\r\n self.tabsContentInner = tabsContentInner\r\n self.temperature = temperature\r\n self.pClassOne = pClassOne\r\n self.pClassTwo = pClassTwo\r\n self.pClassThree = pClassThree\r\n self.min = min\r\n self.max = max\r\n def start_gettingSinoptik(self):\r\n soup = BeautifulSoup(self.html, 'html.parser')\r\n # Ищет в этом классе\r\n items = soup.find_all('div', class_=self.mainClass)\r\n weather1 = []\r\n weather2 = []\r\n weather3 = []\r\n weatherNextDay = []\r\n for item in items:\r\n weather1.append(dict(\r\n title=item.find('p', class_= self.pClassOne).get_text(),\r\n date1=item.find('p', class_= self.pClassTwo).get_text(),\r\n date2=item.find('p', class_= self.pClassThree).get_text(),\r\n temp=item.find('div', class_= self.min).get_text(),\r\n temp2=item.find('div', class_= self.max).get_text(),\r\n ))\r\n itemsNextDay = soup.find_all('div', id=self.mainNextDay)\r\n for item in itemsNextDay:\r\n weatherNextDay.append(dict(\r\n title=item.find('a', class_=self.pClassOne).get_text(),\r\n date1=item.find('p', class_=self.pClassTwo).get_text(),\r\n date2=item.find('p', class_=self.pClassThree).get_text(),\r\n temp=item.find('div', class_=self.min).get_text(),\r\n temp2=item.find('div', class_=self.max).get_text(),\r\n ))\r\n\r\n # print(weatherNextDay)\r\n item2 = soup.find_all('div', class_=self.tabsContentInner)\r\n\r\n for item in item2:\r\n weather2.append(\r\n dict(vol1=item.find('td', class_='p1').get_text(), vol2=item.find('td', class_='p2').get_text(),\r\n vol3=item.find('td', class_='p3').get_text(), vol4=item.find('td', class_='p4').get_text(),\r\n vol5=item.find('td', class_='p5').get_text(), vol6=item.find('td', class_='p6').get_text(),\r\n vol7=item.find('td', class_='p7').get_text(),\r\n vol8=item.find('td', class_='p8').get_text()))\r\n\r\n item3 = soup.find_all('tr', class_=self.temperature)\r\n\r\n for item in item3:\r\n weather3.append(\r\n dict(vol1=item.find('td', class_='p1').get_text(), vol2=item.find('td', class_='p2').get_text(),\r\n vol3=item.find('td', class_='p3').get_text(), vol4=item.find('td', class_='p4').get_text(),\r\n vol5=item.find('td', class_='p5').get_text(), vol6=item.find('td', class_='p6').get_text(),\r\n vol7=item.find('td', class_='p7').get_text(),\r\n vol8=item.find('td', class_='p8').get_text()))\r\n return [weather1, weather2, weather3, weatherNextDay]\r\n\r\n def start_gettingMeteoprog(self):\r\n soup = BeautifulSoup(self.html, 'html.parser')\r\n items = soup.find_all(attrs={'data-daynumber': '0'})\r\n weather1 = []\r\n\r\n for item in items:\r\n weather1.append(dict(\r\n title=item.find('div', class_=self.tabsContentInner).get_text(),\r\n date1=item.find('div', class_=self.temperature).get_text(),\r\n temp1=item.find('div', class_=self.pClassOne).get_text(),\r\n temp2=item.find('div', class_=self.pClassTwo).get_text(),\r\n ))\r\n itemsNextDay = soup.find_all(attrs={'data-daynumber': '1'})\r\n weather2 = []\r\n\r\n for item in itemsNextDay:\r\n weather2.append(dict(\r\n title=item.find('div', class_=self.tabsContentInner).get_text(),\r\n date1=item.find('div', class_=self.temperature).get_text(),\r\n temp=item.find('div', class_=self.pClassOne).get_text(),\r\n temp2=item.find('div', class_=self.pClassTwo).get_text(),\r\n ))\r\n return [weather1, weather2]\r\n\r\nclass ContentSinoptik(Weather_father, Link):\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n super(ContentSinoptik, self).__init__(html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max)\r\n\r\n def outputInf(self, weatherList):\r\n dicSet = super().enter_the_inf()\r\n weatherFather = weatherList[0]\r\n weatherFather2 = weatherList[1]\r\n weatherFather3 = weatherList[2]\r\n weatherNextDay = weatherList[3]\r\n\r\n for i in weatherFather:\r\n w1 = i\r\n for i in weatherFather2:\r\n w2 = i\r\n for i in weatherFather3:\r\n w3 = i\r\n for i in weatherNextDay:\r\n w4 = i\r\n print(\"------------------------------------------------------\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tПогода з сайту Sinoptik \" + dicSet)\r\n print(\"День:\" + w1['title'])\r\n print(\"Число: \" + w1['date1'], ' ', w1['date2'])\r\n print(\"Температура: :\" + w1['temp'], \" || \" + w1['temp2'])\r\n print()\r\n\r\n mw2 = []\r\n for i in w2:\r\n mw2.append(w2[i])\r\n mw3 = []\r\n for i in w3:\r\n mw3.append(w3[i])\r\n print(\"Прогноз на день\")\r\n for i in mw2:\r\n print(i, end=' ')\r\n print()\r\n for j in mw3:\r\n print(j, end=' ')\r\n if j == mw3[4]:\r\n print(end=' ')\r\n if j == mw3[5]:\r\n print(end=' ')\r\n print()\r\n\r\n # ТЕМПЕРАТУРА НА СЕГОДНЯ\r\n result1 = w1['temp'] + w1['temp2']\r\n currentTemp = []\r\n num = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num = num + char\r\n else:\r\n if num != '':\r\n currentTemp.append(int(num))\r\n num = ''\r\n if num != '':\r\n currentTemp.append(int(num))\r\n # ---------------------\r\n\r\n # ТЕМПЕРАТУРА НА ЗАВТРА\r\n result1 = w4['temp'] + w4['temp2']\r\n weatherFuture = []\r\n num2 = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num2 = num2 + char\r\n else:\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n num2 = ''\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n # ---------------------\r\n\r\n # ЗАПИСИСЬ ТЕКУЩЕЙ ПОГОДЫ В ФАЙЛ\r\n try:\r\n files = open(\"w1.txt\", \"w\")\r\n files.write(str(currentTemp))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ БУДУЮЩЕЙ ПОГОДЫ В ФАЙЛ ДЛЯ ПОДАЛЬШЕГО СРАВНЕНИЯ\r\n try:\r\n files = open(\"w2.txt\", \"a\")\r\n files.write(str(weatherFuture))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n # ЗАПИСЬ ЗАВТРАШНЕЙ И СЕГОДНЯШНЕЙ ТЕМПЕРАТУРЫ В ПЕРЕМЕННЫЕ\r\n files = open(\"w2.txt\", \"r\")\r\n openWeatherPast = files.read()\r\n files.close()\r\n files = open(\"w1.txt\", \"r\")\r\n openWeatherCurrent = files.read()\r\n files.close()\r\n\r\n # ПРЕОБРАЗОВАНИЕ В МАСИВ\r\n pastW = list(map(int, openWeatherPast[1:-1].split(',')))\r\n currentW = list(map(int, openWeatherCurrent[1:-1].split(',')))\r\n # ---------------------\r\n\r\n minTemp = currentW[0] - pastW[0]\r\n maxTemp = currentW[1] - pastW[1]\r\n print(\"Відхилення погоди на sinoptik.ua може становити від {} до {} градусів\".format(abs(maxTemp), abs(minTemp)))\r\n print(\"------------------------------------------------------\")\r\n\r\n# https://www.meteoprog.ua/ru/weather/London/\r\nclass ContentMeteoprog(ContentSinoptik):\r\n def __init__(self, html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max):\r\n super(ContentMeteoprog, self).__init__(html, mainClass, mainNextDay, tabsContentInner, temperature, pClassOne, pClassTwo, pClassThree, min, max)\r\n\r\n def outputInf(self, weather):\r\n dicSet = super().enter_the_inf()\r\n weatherToday = weather[0]\r\n weatherTomorrow = weather[1]\r\n for i in weatherToday:\r\n w1 = i\r\n for i in weatherTomorrow:\r\n w2 = i\r\n print(\"------------------------------------------------------\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\tПогода з сайту MeteoProg \" + dicSet)\r\n print(\"День:\" + w1['title'].strip())\r\n print(\"Число: \" + w1['date1'].strip())\r\n print(\"Температура: \" + w1['temp1'] + \" || \" + w1['temp2'])\r\n # ТЕМПЕРАТУРА НА СЕГОДНЯ\r\n result1 = w1['temp1'] + w1['temp2']\r\n currentTemp = []\r\n num1 = \"\"\r\n for char in result1:\r\n if char.isdigit():\r\n num1 = num1 + char\r\n else:\r\n if num1 != '':\r\n currentTemp.append(int(num1))\r\n num1 = ''\r\n if num1 != '':\r\n currentTemp.append(int(num1))\r\n # ---------------------\r\n\r\n # ТЕМПЕРАТУРА НА ЗАВТРА\r\n result2 = w2['temp'] + w2['temp2']\r\n weatherFuture = []\r\n num2 = \"\"\r\n for char in result2:\r\n if char.isdigit():\r\n num2 = num2 + char\r\n else:\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n num2 = ''\r\n if num2 != '':\r\n weatherFuture.append(int(num2))\r\n\r\n # ---------------------\r\n\r\n # ЗАПИСИСЬ ТЕКУЩЕЙ ПОГОДЫ В ФАЙЛ\r\n try:\r\n files = open(\"w1M.txt\", \"w\")\r\n files.write(str(currentTemp))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ БУДУЮЩЕЙ ПОГОДЫ В ФАЙЛ ДЛЯ ПОДАЛЬШЕГО СРАВНЕНИЯ\r\n try:\r\n files = open(\"w2M.txt\", \"a\")\r\n files.write(str(weatherFuture))\r\n files.close()\r\n except:\r\n print()\r\n # ---------------------\r\n\r\n # ЗАПИСЬ ЗАВТРАШНЕЙ И СЕГОДНЯШНЕЙ ТЕМПЕРАТУРЫ В ПЕРЕМЕННЫЕ\r\n files = open(\"w2M.txt\", \"r\")\r\n openWeatherPast = files.read()\r\n files.close()\r\n files = open(\"w1M.txt\", \"r\")\r\n openWeatherCurrent = files.read()\r\n files.close()\r\n # print(\"Температура на завтра\")\r\n # print(openWeatherPast)\r\n # ---------------------\r\n\r\n # ПРЕОБРАЗОВАНИЕ В МАСИВ\r\n pastW = list(map(int, openWeatherPast[1:-1].split(',')))\r\n currentW = list(map(int, openWeatherCurrent[1:-1].split(',')))\r\n # ---------------------\r\n minTemp = pastW[0] - currentW[0]\r\n maxTemp = pastW[1] - currentW[1]\r\n print(\"Відхилення погоди на meteoprog.ua може становити від {} до {} градусів\".format(abs(minTemp), abs(maxTemp)))\r\n print(\"------------------------------------------------------\")\r\n\r\n# Вивід информації х sinoptik\r\nsinoptik= Link()\r\nsinoptikLink = sinoptik.poiskpersSin(\"погода-\"+sinoptik.enter_the_inf())\r\nsinoptikRequest = Request(sinoptikLink)\r\nsinoptikResponse = sinoptikRequest.get_html()\r\nsinoptikOutputInf = ContentSinoptik(sinoptikResponse, 'main loaded', \"bd2\", 'tabsContentInner', 'temperature', 'day-link', 'date', 'month', 'min', 'max')\r\nsinoptikOutputInf.outputInf(sinoptikOutputInf.start_gettingSinoptik())\r\n\r\n# Вивід информації з meteoprog\r\nmeteorg = Link()\r\nmeteorgLink = meteorg.poiskpersMet(\"weather/\"+meteorg.enter_the_inf()+\"/\")\r\nmeteorgRequest = Request(meteorgLink)\r\nmeteorgResponse = meteorgRequest.get_html()\r\nmeteorgOutputInf = ContentMeteoprog(meteorgResponse, \"activeBg\", \"someDayOffWeek\", 'dayoffWeek', 'dayoffMonth', 'from', 'to', \"asd\", \"asds\", \"asdfg\")\r\nmeteorgOutputInf.outputInf(meteorgOutputInf.start_gettingMeteoprog())\r\n\r\n\r\n\r\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":13904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"513823432","text":"class concreteProperties:\r\n def __init__(self,fck,ecu3,ec3,units='MPa'):\r\n allowableUnits = ['MPa']\r\n if units not in allowableUnits:\r\n raise Exception(\"Please use MPa as material units for now\")\r\n self.Ec = (fck*0.85/1.5)/ec3\r\n self.fck = fck\r\n self.fcd = fck*0.85/1.5\r\n self.ecu3 = ecu3\r\n self.ecu2 = ecu3/2\r\n self.ec3 = ec3#-self.fcd/Ec #ec3 in eurocode\r\n\r\nclass steelProperties:\r\n def __init__(self,Es,fyk,euk,k,units='MPa'):\r\n allowableUnits = ['MPa']\r\n if units not in allowableUnits:\r\n raise Exception(\"Please use MPa as material units for now\")\r\n self.Es = Es\r\n self.fyk = fyk\r\n self.fyd = fyk/1.15\r\n self.euk = euk\r\n self.eud = 0.9*euk\r\n self.k = k\r\n self.ey = self.fyd/Es","sub_path":"sectionAnalysis/reinforcedConcrete/materials.py","file_name":"materials.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"88879375","text":"def fatorial (y):\n r = 1\n i = 1\n while i < y:\n i += 1 \n r *= i\n return r\n\ndef calcula_euler (x,n):\n i = 0\n a = 0\n while i < n:\n a += (x**i)/fatorial(n-1)\n i += 1\n return a\n ","sub_path":"backup/user_173/ch119_2020_04_01_15_11_34_816639.py","file_name":"ch119_2020_04_01_15_11_34_816639.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"360675306","text":"import os\nimport fnmatch\nimport warnings\nimport numpy as np\nfrom joblib import Parallel, delayed, parallel_backend\nfrom FRETboard.MainTable import MainTable\nfrom pathlib import Path\n\ndef parse_input_path(location, pattern=None):\n \"\"\"\n Take path, list of files or single file, Return list of files with path name concatenated.\n \"\"\"\n if not isinstance(location, list):\n location = [location]\n all_files = []\n for loc in location:\n loc = Path(loc).resolve()\n if loc.is_dir():\n for root, dirs, files in os.walk(loc):\n if pattern:\n for f in fnmatch.filter(files, pattern):\n all_files.append(os.path.join(root, f))\n else:\n for f in files:\n all_files.append(os.path.join(root, f))\n elif loc.exists():\n all_files.extend(str(loc))\n else:\n warnings.warn('Given file/dir %s does not exist, skipping' % str(loc), RuntimeWarning)\n if not len(all_files):\n ValueError('Input file location(s) did not exist or did not contain any files.')\n return all_files\n\n\ndef parallel_fn(f_array, fn_list, dt):\n for fi, f in enumerate(np.hsplit(f_array, f_array.shape[1])):\n f = f.squeeze()\n dt.add_tuple(np.row_stack((np.arange(f.shape[1]), f)), fn_list[fi])\n return dt.data\n\n\ndef parse_trace_file(file_contents, fn, threads, eps):\n \"\"\"\n Take contents extracted from .trace binary file, return list of [threads] MainTable objects\n \"\"\"\n nb_colors = 2\n nb_frames, _, nb_traces = np.frombuffer(file_contents, dtype=np.int16, count=3)\n traces_vec = np.frombuffer(file_contents, dtype=np.int16)\n traces_vec = traces_vec[3:]\n nb_points_expected = nb_colors * (nb_traces // nb_colors) * nb_frames\n traces_vec = traces_vec[:nb_points_expected]\n file_contents = traces_vec.reshape((nb_colors, nb_traces // nb_colors, nb_frames), order='F')\n fn_clean = os.path.splitext(fn)[0]\n\n file_chunks = np.array_split(file_contents, threads, axis=1)\n fn_list = [f'{fn_clean}_{it}.dat' for it in range(file_contents.shape[1])]\n fn_chunks = np.array_split(fn_list, threads)\n\n df_list = Parallel(n_jobs=threads)(delayed(parallel_fn)(fc, fnc, MainTable([], eps))\n for fc, fnc in zip(file_chunks, fn_chunks))\n return df_list\n","sub_path":"build/lib/FRETboard/io_functions.py","file_name":"io_functions.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"223958615","text":"import time\nimport klassen\nimport spelspelen\nimport spelstarten\ninstelling_twee = spelstarten.instelling_twee\ninstelling_zeven = spelstarten.instelling_zeven\ninstelling_acht = spelstarten.instelling_acht\ninstelling_tien = spelstarten.instelling_tien\ninstelling_boer = spelstarten.instelling_boer\ninstelling_heer = spelstarten.instelling_heer\ninstelling_joker = spelstarten.instelling_joker\ninstelling_aas = spelstarten.instelling_aas\n\n#functie voor het tellen van kaarten op symbool\ndef kaart_teller(speler_hand):\n L=[klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand(), klassen.hand()]\n i = len(speler_hand)-1\n #check alle kaarten in hand, sorteer op symbool\n while i >= 0:\n if speler_hand[i].symbool == 'schoppen':\n L[0].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'ruiten':\n L[1].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'harten':\n L[2].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].symbool == 'klaveren':\n L[3].append(speler_hand[i])\n i=i-1\n elif speler_hand[i].waarde == 'J':\n L[4].append(speler_hand[i])\n i=i-1\n # kijk of er 2'en aanwezig zijn.\n if speler_hand[i].waarde == '2':\n L[5].append(speler_hand[i])\n #return telling van sortering\n return ([len(L[0]),len(L[1]), len(L[2]), len(L[3]), len(L[4]), len(L[5])])\n\n#functie voor het uitvoeren van de pestkaart twee\ndef kaart_twee(gespeeld,deck,handen,volgorde,beurt):\n #check instelling\n if instelling_twee == 'ja':\n #tel 2'en in hand volgende speler\n A = kaart_teller(handen[volgorde[(beurt+1)%len(volgorde)]])\n print(volgorde[beurt], 'heeft een twee gespeeld!')\n time.sleep(3)\n if A[-1] == 0:\n #Als geen 2'en in hand, pak twee kaarten\n print(volgorde[(beurt+1)%len(volgorde)], 'moet twee kaarten pakken')\n time.sleep(3)\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n beurt = beurt + 1\n else:\n #Als wel 2'en in hand\n speler_verschil = 1\n aantal_twee = A[-1]\n while aantal_twee != 0:\n #Check handen volgende spelers totdat er een speler is zonder 2'en.\n aantal_twee = kaart_teller(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]])[-1]\n if aantal_twee != 0:\n print(volgorde[(beurt+speler_verschil)%len(volgorde)], 'heeft ook een twee!')\n time.sleep(3)\n index = len(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]])-1\n #kijk waar twee in hand zit\n while handen[volgorde[(beurt+speler_verschil)%len(volgorde)]][index].waarde != '2':\n index = index-1\n gespeeld.append(handen[volgorde[(beurt+speler_verschil)%len(volgorde)]].pop(index))\n speler_verschil = speler_verschil+1\n #stel vast het aantal te pakken kaarten \n aantal = len(gespeeld)*2 \n print(volgorde[(beurt+speler_verschil-1)%len(volgorde)], 'moet',aantal,'kaarten pakken')\n time.sleep(3)\n k = 0\n #pak alle kaarten\n while k != aantal:\n spelspelen.kaart_pakken(handen[volgorde[(beurt+speler_verschil-1)%len(volgorde)]],deck)\n k = k+1 \n beurt = beurt + speler_verschil - 1\n return ([gespeeld,deck,handen,beurt]) \n\n#functie voor de pestkaart zeven \ndef kaart_zeven(beurt):\n #checkt instelling\n if instelling_zeven == 'ja':\n #beurt 1 plaats terug (wordt later met 1 verhoogt)\n beurt = beurt - 1\n return beurt\n\n#functie voor de pestkaart acht\ndef kaart_acht(beurt):\n #checkt instelling\n if instelling_acht == 'ja':\n #voegt 1 toe bij de beurt zodat de volgende speler wordt overgeslagen\n beurt = beurt + 1\n return beurt\n\n#functie voor de pestkaart tien \ndef kaart_tien(handen,volgorde,beurt):\n #checkt instelling\n if instelling_tien == 'ja':\n a=len(handen[volgorde[beurt]])\n #voeg elke kaart uit de hand van de volgende speler toe aan de hand van de beurtspeler\n for i in range(len(handen[volgorde[(beurt+1)%len(volgorde)]])):\n handen[volgorde[beurt]].append(handen[volgorde[(beurt+1)%len(volgorde)]][i])\n #maak de hand van de volgende speler leeg\n handen[volgorde[(beurt+1)%len(volgorde)]].clear()\n #voeg de 'oude' kaarten uit de hand van de beurtspeler toe aan de hand van de volgende speler\n for i in range(a):\n handen[volgorde[(beurt+1)%len(volgorde)]].append(handen[volgorde[beurt]][i])\n #verwijder de 'oude' kaarten uit de hand van de beurtspeler\n for i in range(a):\n handen[volgorde[beurt]].remove(handen[volgorde[beurt]][0])\n #laat de menselijke speler weten wat er precies gebeurt\n print(volgorde[beurt],'heeft handen gewisseld met',volgorde[(beurt+1)%len(volgorde)],'!')\n time.sleep(3)\n return handen\n\n#functie voor de pestkaart boer \ndef kaart_boer(gespeeld,volgorde,beurt,handen):\n #checkt instelling\n if instelling_boer == 'ja':\n #als de speler aan de beurt is: vraag om het nieuwe symbool\n if volgorde[beurt] == 'speler': \n print('Welk symbool wilt u spelen?')\n symbool_input = input('Uw keuze is: ')\n #zolang input niet voldoet aan 1 cvan de symbolen, vraag opnieuw\n while symbool_input != 'schoppen' and symbool_input != 'harten' and symbool_input != 'klaveren' and symbool_input != 'ruiten':\n print('U kunt kiezen uit: schoppen, klaveren, ruiten of harten')\n symbool_input = input('Uw keuze is: ')\n #voeg vervolgens de gekozen boer toe aan de pot\n gespeeld.append(klassen.kaart(str(symbool_input),'B'))\n else:\n #tegenstander checkt van welk symbool hij de meeste heeft\n aantal_symbolen = kaart_teller(handen[volgorde[beurt]])\n #verwijdert de telling van de joker en 2'en\n aantal_symbolen.remove(aantal_symbolen[-1])\n aantal_symbolen.remove(aantal_symbolen[-1])\n #selecteer het symbool waarvan de meeste zijn\n max_symbool = max(aantal_symbolen)\n i = 0\n #zoekt door gebruik van de index weer naar de naam van het symbool waarvan de meeste zijn.\n while aantal_symbolen[i] != max_symbool:\n i = i + 1\n symbolen = ['schoppen','ruiten','harten','klaveren']\n print(volgorde[beurt], 'heeft de pot veranderd in', symbolen[i],'!')\n time.sleep(3)\n #voegt vervolgens de gekozen boer toe aan de pot voor de goede weergave van de zet\n gespeeld.append(klassen.kaart(symbolen[i],'B'))\n #geeft gespeeld terug met daarin de boer\n return gespeeld\n\n#functie voor de pestkaart heer\ndef kaart_heer(beurt):\n #checkt instelling\n if instelling_heer == 'ja':\n #beurt 1 plaats terug (wordt later met 1 verhoogd)\n beurt = beurt - 1\n return beurt\n\n#functie voor de pestkaart aas\ndef kaart_aas(beurt,volgorde):\n #checkt instelling\n if instelling_aas == 'ja':\n #maak nieuwe volgorde aan en bepaal index\n nieuwevolgorde=[]\n i = beurt\n #Ga alle spelers in volgorde langs en voeg zij toe aan nieuwe volgorde door steeds de persoon voor beurt te kiezen.\n while i >= 0:\n nieuwevolgorde.append(volgorde[i])\n i = i - 1\n for i in range(1,len(volgorde)-beurt):\n nieuwevolgorde.append(volgorde[-i])\n #stel beurt weer in om bij het begin van volgorde te beginnen\n beurt = 0\n print(nieuwevolgorde[beurt],'heeft de volgorde omgedraaid!')\n time.sleep(3)\n #return de beurt en de nieuwe volgorde\n return ([beurt,nieuwevolgorde])\n #als instelling nee, return meteen beurt en volgorde\n else:\n return ([beurt,volgorde])\n\n#functie voor de pestkaart joker\ndef kaart_joker(handen,deck,volgorde,beurt):\n #check instelling \n if instelling_joker == 'ja':\n # voer de functie kaartpakken 5 keer uit voor de volgende speler\n for i in range(5):\n spelspelen.kaart_pakken(handen[volgorde[(beurt+1)%len(volgorde)]],deck)\n # return alle handen en het deck\n return ([handen,deck])\n","sub_path":"Pesten/pestkaarten.py","file_name":"pestkaarten.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"62052758","text":"from collections import defaultdict\n\nd = defaultdict(int)\nwith open('q1.txt', mode='r') as f:\n for l in f.readlines():\n for c in l:\n if 97 <= ord(c) <= 122:\n d[c] += 1\n elif 65 <= ord(c) <= 90:\n d[c.lower()] += 1\nfor c, h in sorted(d.items(), key=lambda o: o[0]):\n print(c, h)\n","sub_path":"2007_summer/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"296639489","text":"import os\nimport tempfile\nfrom io import StringIO\nfrom logging.config import fileConfig\nfrom unittest import TestCase\n\nimport pandas as pd\n\nfrom algorithms.RelationExtractionLinearFactory import RelationExtractionLinearFactory\nfrom algorithms.result_scorer import ResultScorer\n\n\nclass TestSitRelationExtractionLinearFactory(TestCase):\n def setUp(self):\n fileConfig(os.path.join(os.path.dirname(__file__), 'logger.ini'))\n\n def test_call(self):\n # Arrange\n out_dir= tempfile.mkdtemp()\n embedding = StringIO(\"\\n\".join([\"hat 0.2 .34 0.8\", \"mat 0.5 .34 0.8\", \"entity1 0.5 .55 0.8\", \"entity2 0.3 .55 0.9\"]))\n sut = RelationExtractionLinearFactory(class_size=2, embedding_handle=embedding, embedding_dim=3, ngram=1,\n output_dir=out_dir, pos_label=\"1\")\n\n train_df = [[\"This is good\", \"entity1\", \"entity2\"],\n [\"this is a cat not a hat\", \"mat protein\", \"cat protein\"]]\n\n val_data = [[\"This is hat\", \"entity1\", \"entity2\"],\n [\"this is a cat not a mat\", \"mat protein\", \"cat protein\"]]\n\n labels = [\"1\", \"0\"]\n cols =['abstract', 'entity1', 'entity2']\n train_df = pd.DataFrame(train_df, columns=cols)\n val_df = pd.DataFrame(val_data,columns=cols)\n\n # Act\n actual = sut(train_df, labels, val_df, labels)\n\n def test_predict(self):\n # Arrange\n out_dir = tempfile.mkdtemp()\n embedding = StringIO(\n \"\\n\".join([\"hat 0.2 .34 0.8\", \"mat 0.5 .34 0.8\", \"entity1 0.5 .55 0.8\", \"entity2 0.3 .55 0.9\"]))\n pos_label = 1\n sut = RelationExtractionLinearFactory(class_size=2, embedding_handle=embedding, embedding_dim=3, ngram=1,\n output_dir=out_dir, pos_label=pos_label)\n\n train_df = [[\"This is good\", \"entity1\", \"entity2\"],\n [\"this is a cat not a hat\", \"mat protein\", \"cat protein\"]]\n\n val_data = [[\"This is hat\", \"entity1\", \"entity2\"],\n [\"this is a cat not a mat\", \"mat protein\", \"cat protein\"]]\n\n labels = [1, 0]\n cols = ['abstract', 'entity1', 'entity2']\n train_df = pd.DataFrame(train_df, columns=cols)\n val_df = pd.DataFrame(val_data, columns=cols)\n\n model, expected_scores, expected_actual, expected_predicted = sut(train_df, labels, val_df, labels)\n\n predictor = RelationExtractionLinearFactory.load(out_dir)\n\n scorer = ResultScorer()\n\n # Act\n actual = predictor(val_df)\n\n self.assertSequenceEqual(expected_scores, scorer(y_pred=actual, y_actual=labels, pos_label=1))\n","sub_path":"tests/test_algorithms/test_sit_relationExtractionLinearFactory.py","file_name":"test_sit_relationExtractionLinearFactory.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"301922518","text":"from flask_restplus import Namespace, Resource, reqparse\nfrom dateutil import parser as datetime_parser\nfrom models import *\n\n\napi = Namespace('Loan History', description='Loans related operations')\n\nparser = reqparse.RequestParser()\nparser.add_argument('book_id', help='The identifier of the book loaned out')\nparser.add_argument('borrower_id', help='The user_id of the borrower of the book')\nparser.add_argument('due', help='The due date of the book')\nparser.add_argument('actual_return_date', help='The actual return date of the book')\n\npost_parser = parser.copy()\npost_parser.remove_argument('actual_return_date')\npost_parser.replace_argument('book_id', help='The identifier of the book loaned out', required=True)\npost_parser.replace_argument('borrower_id', help='The user_id of the borrower of the book', required=True)\n\n\n@api.route('/')\nclass Loans(Resource):\n @api.doc('create_loan')\n @api.doc(responses={\n 201: 'Created',\n 400: 'Validation Error',\n 404: 'Book or User Not Found',\n })\n @api.expect(post_parser)\n def post(self):\n '''create a loan'''\n args = post_parser.parse_args()\n book_id = args['book_id']\n book = Book.query.get_or_404(book_id)\n if book.LoanedOut:\n return \"The book is already loaned out\", 400\n new_loan_history = LoanHistory(BookId=args['book_id'],\n BorrowerId=args['borrower_id'])\n due = args['due']\n if due is not None:\n new_loan_history.Due = datetime_parser.parse(due)\n db.session.add(new_loan_history)\n book.LoanedOut = True\n db.session.flush()\n db.session.commit()\n return new_loan_history.serialize(), 201\n\n @api.doc('get_loan')\n @api.doc(responses={\n 200: 'Success',\n 400: 'Validation Error'\n })\n @api.expect(parser)\n def get(self): \n '''get all loans given constraints'''\n args = parser.parse_args()\n book_id = args['book_id']\n borrower_id = args['borrower_id']\n due = args['due']\n actual_return_date = args['actual_return_date']\n\n queries = []\n if book_id is not None:\n queries.append(LoanHistory.BookId == book_id)\n if borrower_id is not None:\n queries.append(LoanHistory.BorrowerId == borrower_id)\n if due is not None:\n due = datetime_parser.parse(due)\n queries.append(LoanHistory.Due == due)\n if actual_return_date is not None:\n actual_return_date = datetime_parser.parse(actual_return_date)\n queries.append(LoanHistory.ActualReturnDate == actual_return_date)\n\n loan_list = db.session.query(LoanHistory).filter(*queries).order_by(LoanHistory.LoanId).all()\n return Serializer.serialize_list(loan_list), 200\n\n\n@api.route('/')\n@api.param('loan_id', 'The loan identifier')\n@api.response(404, 'Loan Not Found')\nclass LoanOfID(Resource):\n @api.doc(responses={\n 200: 'Success',\n })\n @api.doc('get_loan')\n def get(self, loan_id):\n '''Fetch a loan given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n return loan.serialize(), 200\n\n @api.doc(responses={\n 200: 'Success',\n })\n @api.doc(params={'due': 'The due date of the book'})\n @api.doc(params={'actual_return_date': 'The actual return date of the book'})\n @api.expect(parser)\n def put(self, loan_id):\n '''Update the content of a loan given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n args = parser.parse_args()\n due = args['due']\n actual_return_date = args['actual_return_date']\n if due is not None:\n loan.Due = datetime_parser.parse(due)\n if actual_return_date is not None:\n # TODO: check if return date is < current date\n loan.ActualReturnDate = datetime_parser.parse(actual_return_date)\n book = Book.query.get_or_404(loan.BookId)\n if book.LoanedOut:\n book.LoanedOut = False\n db.session.commit()\n return loan.serialize(), 200\n\n @api.doc(responses={\n 204: 'Deleted',\n })\n def delete(self, loan_id):\n '''Delete a note given its identifier'''\n loan = LoanHistory.query.get_or_404(loan_id)\n book = Book.query.get_or_404(loan.BookId)\n if book.LoanedOut:\n book.LoanedOut = False\n LoanHistory.query.filter_by(LoanId=loan_id).delete()\n db.session.commit()\n return 'Success', 204\n","sub_path":"apis/loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"106654362","text":"from .logs import Registry\n\n\nlog = Registry()\n\n\nrename = log.rename\nupdate = log.update\n\n\ndebug = log.debug\ninfo = log.info\nwarn = log.warn\nwarning = log.warning\nerror = log.error\nexception = log.error\ncritical = log.critical\n\n# from .enums import Levels\n# from .formatters import GenericFormatter\n# from .handlers import FileHandler\n#\n#\n#\n#\n#\n#\n# DefaultConfig = {\n# \"disabled\": True,\n# \"formatters\": {\n# \"default\": {\n# \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n# \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\"\n# }\n# },\n# \"handlers\": {\n# \"console\": {\n# \"class\": \"logging.StreamHandler\",\n# \"level\": \"DEBUG\",\n# \"formatter\": \"default\",\n# \"stream_\": \"ext://sys.stdout\"\n# },\n# \"logfile\": {\n# \"class\": \"logging.handlers.RotatingFileHandler\",\n# \"level\": \"DEBUG\",\n# \"formatter\": \"default\",\n# \"maxBytes\": 25000,\n# \"backupCount\": 2,\n# \"filename\": \"/logs/application.log\"\n# }\n# },\n# \"loggers\": {\n# \"%NAME%\": {\n# \"level\": \"DEBUG\",\n# \"handlers\": [\"console\", \"logfile\"],\n# \"propagate\": False\n# }\n# }\n# }","sub_path":"fuze/log/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"192861933","text":"import json\nimport os\nfrom datetime import datetime, timezone\nfrom json.decoder import JSONDecodeError\nfrom os import path, remove\nfrom shutil import rmtree, move\nfrom tempfile import mkdtemp\n\nfrom retriever import reload_scripts, dataset_names\nfrom retriever import download\nfrom retriever.lib.engine_tools import getmd5\nfrom retriever.lib.defaults import HOME_DIR\n\nimport sys\nimport csv\n\ntry:\n from .status_dashboard_tools import get_dataset_md5\n from .status_dashboard_tools import diff_generator, diff_generator_spatial, data_shift\n from .status_dashboard_tools import create_dirs\n from .status_dashboard_tools import dataset_type, join_path, delete_raw_data\n from .status_dashboard_tools import install_postgres\nexcept ImportError as error:\n try:\n from retrieverdash.dashboard_script.status_dashboard_tools import get_dataset_md5\n from retrieverdash.dashboard_script.status_dashboard_tools import diff_generator, diff_generator_spatial, data_shift\n from retrieverdash.dashboard_script.status_dashboard_tools import create_dirs\n from retrieverdash.dashboard_script.status_dashboard_tools import dataset_type, join_path, delete_raw_data\n from retrieverdash.dashboard_script.status_dashboard_tools import install_postgres\n except ImportError as error:\n pass\n# To set location of the path\nfile_location = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))\n\n# To prevent field size errors when converting to csv\nmaxInt = sys.maxsize\ndecrement = True\nwhile decrement:\n try:\n csv.field_size_limit(maxInt)\n decrement = False\n except OverflowError:\n maxInt = int(maxInt / 10)\n\n# The DEV_LIST, useful for testing on less strong machines.\nDEV_LIST = ['iris', 'poker-hands', 'harvard-forest', 'titanic']\nIGNORE = [\n 'activity-timberharvest',\n 'covid-case-surveillance',\n 'aquatic-animal-excretion',\n 'lakecats-final-tables',\n # Amazon\n 'baltimore-restaurants',\n 'fernow-soil-productivity',\n 'nlcd-imperviousness-conus',\n 'white-clay-creek-avondale-soil',\n 'white-clay-creek-boulton-chemistry',\n 'white-clay-creek-chlorophyll',\n 'white-clay-creek-christina-chemistry',\n 'white-clay-creek-christina-sediment',\n 'white-clay-creek-christina-temperatures',\n 'white-clay-creek-streamflow',\n 'white-clay-creek-swrc-meteorology',\n 'white-clay-creek-waterlevels',\n 'white-clay-dissolved-carbon',\n 'white-clay-dissolved-carbon',\n 'usda-agriculture-plants-database',\n 'vertnet',\n 'vertnet-amphibian',\n 'vertnet-bird',\n 'vertnet-fishe',\n 'vertnet-mammal',\n 'vertnet-reptiles'\n]\n\nDATASET_DETAIL_JSON = os.path.join(file_location, \"dataset_details.json\")\nCURRENT_PATH = os.path.join(file_location, 'current')\nDATASET_DATA_FOLDER = os.path.join(file_location, 'current', '{dataset_name}')\n\n\ndef check_dataset(dataset):\n md5 = None\n status = None\n reason = None\n diff = None\n dataset_detail = None\n previous_md5 = \"\"\n\n try:\n dataset_detail = load_dataset_details()\n previous_detail_records = \"dataset_details\" in dataset_detail and dataset_detail[\n \"dataset_details\"]\n dataset_has_record = dataset.name in dataset_detail['dataset_details']\n if previous_detail_records and dataset_has_record:\n previous_md5 = dataset_detail['dataset_details'][dataset.name]['md5']\n\n if dataset_type(dataset) == 'spatial':\n install_postgres(dataset)\n dir_path = DATASET_DATA_FOLDER.format(dataset_name=dataset.name)\n md5 = getmd5(dir_path, data_type='dir')\n if not dataset_has_record or md5 != previous_md5:\n diff = diff_generator_spatial(dataset)\n else:\n remove_old_diff(dataset)\n data_shift(dataset, is_spatial=True)\n else:\n md5 = get_dataset_md5(dataset)\n if not dataset_has_record or md5 != previous_md5:\n diff = diff_generator(dataset)\n else:\n remove_old_diff(dataset)\n data_shift(dataset)\n status = True\n except Exception as e:\n reason = str(e)\n status = False\n finally:\n json_file_details = dataset_detail\n json_file_details[\"dataset_details\"][dataset.name] = {\n \"md5\": md5,\n \"status\": status,\n \"reason\": reason,\n \"diff\": diff\n }\n json_file_details[\"last_checked_on\"] = datetime.now(\n timezone.utc).strftime(\"%d %b %Y\")\n dataset_details_write = open(DATASET_DETAIL_JSON, 'w')\n json.dump(json_file_details, dataset_details_write, sort_keys=True, indent=4)\n dataset_details_write.close()\n delete_raw_data(dataset)\n\n\ndef remove_old_diff(dataset):\n \"\"\"Delete old html diffs from previous run\"\"\"\n for keys in dataset.tables:\n file_name = '{}.{}'.format(dataset.name.replace('-', '_'), keys)\n html_file_name = '{}.html'.format(file_name)\n old_diff = os.path.join(file_location, 'diffs', html_file_name)\n if os.path.exists(old_diff):\n remove(old_diff)\n\n\ndef load_dataset_details():\n \"\"\"Read dataset details from last run \"\"\"\n try:\n with open(DATASET_DETAIL_JSON, 'r') as json_file:\n dataset_detail = json.load(json_file)\n except (OSError, JSONDecodeError):\n dataset_detail = dict()\n dataset_detail['dataset_details'] = {}\n\n for dataset_ignore in IGNORE:\n if dataset_detail['dataset_details'] and dataset_ignore in dataset_detail[\n 'dataset_details']:\n dataset_detail['dataset_details'].pop(dataset_ignore)\n return dataset_detail\n\n\ndef run():\n create_dirs()\n datasets_to_check = []\n\n if os.environ.get(\"RETR_TEST\") == \"true\":\n datasets_to_check = [\n script for script in reload_scripts() if script.name in DEV_LIST\n ]\n else:\n datasets_to_check = [\n script for script in reload_scripts() if script.name not in IGNORE\n ]\n\n for dataset in datasets_to_check:\n print(\"Checking dataset {}:\".format(dataset.name))\n check_dataset(dataset)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"retrieverdash/dashboard_script/dashboard_script.py","file_name":"dashboard_script.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"491858525","text":"# Regression Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\n\n# Importing the dataset\nxt = pd.read_csv('dengue_features_train.csv')\nyt = pd.read_csv('dengue_labels_train.csv')\nwt = pd.read_csv('dengue_features_test.csv')\n\nX = xt.iloc[:,4:].values\ny = yt.iloc[:, 3].values\nw = wt.iloc[:,4:].values\n\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(X)\nX = imputer.transform(X)\n\nimputer = imputer.fit(w)\nw = imputer.transform(w)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n# Fitting the Regression Model to the dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators=300,random_state=0)\nregressor.fit(X,y)\n\n# Predicting a new result\ny_pred = regressor.predict(w)\n\nfilename = 'model.pkl'\npickle.dump(regressor, open(filename, 'wb'))","sub_path":"DenguePredictor.py","file_name":"DenguePredictor.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"67244461","text":"import sys\r\nimport socket\r\nimport time\r\nimport threading \r\nimport queue\r\nimport random\r\nimport select\r\nfrom buffer import Buffer\r\nfrom pacote import Pacote\r\n\r\nclass Cliente:\r\n\tdef __init__(self, log, hostPort, tamanhoJanela, temporizador, pError):\r\n\t\tself.nomeLog = log\r\n\t\tself.host = hostPort[0]\r\n\t\tself.port = int(hostPort[1])\r\n\t\tself.temporizador = int(temporizador)\r\n\t\tself.pError = float(pError)\r\n\r\n\t\tself.sock = self.iniciaSock()\r\n\t\tself.janela = Buffer(int(tamanhoJanela)) \r\n\t\tself.filaDeEspera = queue.Queue(maxsize = int(tamanhoJanela)) #fila de espera da janela\r\n\r\n\t\tself.logsTransmitidos = 0\r\n\t\tself.logsDistintosTransmitidos = 0\r\n\t\tself.logsIncorretosTransmitidos = 0\r\n\t\t\r\n\t\tself.enviando = True #indica se a filaDeEspera está sendo alimentada\r\n\t\tself.confirmados = False #indica se todos os pacotes foram confirmados\r\n\t\tself.permissaoSock = threading.Lock() #permissão para utilizar o sock\r\n\r\n\tdef iniciaSock(self):\r\n\t\ttry:\r\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,0)\r\n\t\texcept socket.error:\r\n\t\t\tprint (\"Falha ao criar socket\")\r\n\t\t\tsys.exit()\r\n\t\r\n\t\treturn sock\r\n\r\n\tdef abrirArquivo(self, arquivo):\r\n\t\tfor linha in open(arquivo):\r\n\t\t\tyield linha, \"%.20f\"%time.time()\r\n\r\n\tdef lerArquivoLog(self):\r\n\t\tidentificador = 0\r\n\t\tlinhas = self.abrirArquivo(self.nomeLog)\r\n\r\n\t\tfor linha, timesatamp in linhas:\r\n\t\t\tsegundos, nanosegundos = timesatamp.split('.')\r\n\t\t\tpacote = Pacote(identificador, segundos, nanosegundos, linha[:-1]) #linha[:-1] retira o \\n\r\n\t\t\tself.filaDeEspera.put(pacote, block = True)\r\n\t\t\tidentificador += 1\r\n\r\n\t\tself.enviando = False\r\n\r\n\t\treturn\r\n\r\n\tdef cuidarDaTransmissao(self, pacote):\r\n\t\twhile not pacote.confirmado and not self.confirmados:\r\n\t\t\terro = random.random()\r\n\r\n\t\t\tif erro > self.pError:\r\n\t\t\t\tself.sock.sendto(pacote.pacoteParaRede(erro=False), (self.host, self.port))\r\n\t\t\telse:\r\n\t\t\t\tself.sock.sendto(pacote.pacoteParaRede(erro=True), (self.host, self.port))\r\n\t\t\t\tself.logsIncorretosTransmitidos += 1\r\n\r\n\t\t\tself.logsTransmitidos += 1\r\n\t\t\ttime.sleep(self.temporizador)\r\n\r\n\t\treturn\r\n\r\n\tdef confirmarPacote(self, identificador):\r\n\t\tfor pacote in self.janela.dados:\t\r\n\t\t\tif not(pacote == None) and pacote.identificador == identificador:\r\n\t\t\t\tpacote.confirmado = True\r\n\t\t\t\tbreak\r\n\r\n\tdef escutarServidor(self):\r\n\t\tdados = None\r\n\t\twhile not self.confirmados:\r\n\t\t\tentrada = None\r\n\t\t\tentrada, saida, excecao = select.select([self.sock], [], [], 10)\r\n\r\n\t\t\tif entrada:\r\n\t\t\t\tdados, _ = self.sock.recvfrom(36) #36 é o tamanho do cabeçalho\r\n\t\t\t\tpacoteRecebido, md5Recebido = Pacote.redeParaPacote(dados, texto = False)\r\n\t\t\t\tif pacoteRecebido.verificaMD5(md5Recebido):\r\n\t\t\t\t\tself.confirmarPacote(pacoteRecebido.identificador)\r\n\r\n\t\treturn\r\n\r\n\tdef enviarPacotes(self):\r\n\t\tnovoItens = True\r\n\t\tself.janela.insere(self.filaDeEspera.get(block = True)) #primeiro\r\n\r\n\t\touvinte = threading.Thread(target = self.escutarServidor)\r\n\t\touvinte.start()\r\n\r\n\t\twhile self.janela.contemItens() or self.enviando :\r\n\t\t\twhile self.janela.temEspaco() and novoItens:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.janela.insere(self.filaDeEspera.get(block = True, timeout = 5))\r\n\t\t\t\texcept queue.Empty:\r\n\t\t\t\t\tif not self.enviando:\r\n\t\t\t\t\t\tnovoItens = False\r\n\r\n\t\t\tfor pacote in self.janela.dados:\r\n\t\t\t\tif not(pacote == None) and not pacote.enviado:\r\n\t\t\t\t\tself.logsDistintosTransmitidos += 1\r\n\t\t\t\t\tpacote.enviado = True\r\n\t\t\t\t\tenvio = threading.Thread(target = self.cuidarDaTransmissao, \r\n\t\t\t\t\t\targs = [pacote])\r\n\r\n\t\t\t\t\tenvio.start()\r\n\r\n\t\t\tfor posicao in range (len(self.janela.dados)):\r\n\t\t\t\tif not(self.janela.dados[posicao] == None) and self.janela.dados[posicao].confirmado:\r\n\t\t\t\t\tself.janela.liberaEspaco(posicao)\r\n\r\n\t\tself.confirmados = True #parametro que mantém o ouvinte\r\n\t\touvinte.join()\r\n\r\n\t\treturn\r\n\r\n\tdef __str__(self):\r\n\t\treturn f'''nome do arquivo de log : {self.nomeLog}\r\n\t\t\\rHOST: {self.host}\r\n\t\t\\rPORT: {self.port}\r\n\t\t\\rtamanho da janela deslizante: {self.janela.tamanho}\r\n\t\t\\rtemporizador: {self.temporizador}\r\n\t\t\\rprobabilidade de erro no MD5: {self.pError}'''\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) < 6:\r\n\t\tprint('Inicialização incorreta:')\r\n\t\tsys.exit()\r\n\r\n\ttempoInicial = time.time()\r\n\tcliente = Cliente(sys.argv[1], sys.argv[2].split(':'), sys.argv[3], sys.argv[4], sys.argv[5])\r\n\r\n\tthreadLendoLog = threading.Thread(target = cliente.lerArquivoLog)\r\n\tthreadEnviandoPacotes = threading.Thread(target = cliente.enviarPacotes)\r\n\r\n\tthreadLendoLog.start()\r\n\tthreadEnviandoPacotes.start()\r\n\r\n\tthreadLendoLog.join()\r\n\tthreadEnviandoPacotes.join()\r\n\t\r\n\ttempoDeExecucao = '%.3f'%(time.time() - tempoInicial)\r\n\tprint(f'{cliente.logsDistintosTransmitidos} {cliente.logsTransmitidos} {cliente.logsIncorretosTransmitidos} {tempoDeExecucao}')","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"312065343","text":"import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\n\n\"\"\"\n This script define the structure of discriminator\n According to the original Re-cycle GAN paper, \n the structure of discriminator is 70x70 PatchGAN\n And also it is also used in original CycleGAN official implementation\n Thus we borrow the implementation from: https://github.com/aitorzip/PyTorch-CycleGAN/blob/master/models.py\n\"\"\"\n\nclass Discriminator(nn.Module):\n def __init__(self, n_in = 3, r = 1):\n super().__init__()\n\n # A bunch of convolutions one after another\n model = [ nn.Conv2d(n_in, 64 // r, 4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(64 // r, 128 // r, 4, stride=2, padding=1),\n nn.InstanceNorm2d(128 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(128 // r, 256 // r, 4, stride=2, padding=1),\n nn.InstanceNorm2d(256 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n model += [ nn.Conv2d(256 // r, 512 // r, 4, padding=1),\n nn.InstanceNorm2d(512 // r), \n nn.LeakyReLU(0.2, inplace=True) ]\n\n # FCN classification layer\n model += [nn.Conv2d(512 // r, 1, 4, padding=1)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n x = self.model(x)\n # Average pooling and flatten\n return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)","sub_path":"lib/model/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"160238373","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport pandas as pd\nimport logging\nimport os\nimport sentencepiece as spm\n#tf.enable_eager_execution()\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\n\nfrom sklearn.model_selection import train_test_split\n\n\nfile1=open('english.txt','r',encoding='utf-8')\nfile2=open('gujarati.txt','r',encoding='utf-8')\n\nt1=[]\nfor i in file1.readlines():\n t1.append(i[:-1])\nt2=[]\nfor i in file2.readlines():\n t2.append(i[:-1])\n\n\nraw_data = pd.DataFrame(list(zip(t1, t2)), columns =['eng', 'guj'])\n\n# split data into train and test set\ntrain, test = train_test_split(raw_data.values, test_size=0.3, random_state = 12)\n\n# data preprocessing\nraw_data_en=list(train[:,0])\nraw_data_fr=list(train[:,1])\n\nspm.SentencePieceTrainer.train('--input=gujarati.txt --model_prefix=g --vocab_size=2000')\nsp = spm.SentencePieceProcessor()\nsp.load('g.model')\n\nspm.SentencePieceTrainer.train('--input=english.txt --model_prefix=e --vocab_size=2000')\nsp1 = spm.SentencePieceProcessor()\nsp1.load('e.model')\n\n\nfor i in range(len(raw_data_en)):\n raw_data_en[i]=\" \".join(sp1.encode_as_pieces(raw_data_en[i]))\n raw_data_fr[i]=\" \".join(sp.encode_as_pieces(raw_data_fr[i]))\n\nraw_data_fr_in = [' ' + data for data in raw_data_fr]\nraw_data_fr_out = [data + ' ' for data in raw_data_fr]\n\ntest_en=list(test[:,0])\n\ntest_fr=list(test[:,1])\n\n# tokenization\nen_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\nen_tokenizer.fit_on_texts(raw_data_en)\ndata_en = en_tokenizer.texts_to_sequences(raw_data_en)\ndata_en = tf.keras.preprocessing.sequence.pad_sequences(data_en,padding='post')\n\nfr_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\nfr_tokenizer.fit_on_texts(raw_data_fr_in)\nfr_tokenizer.fit_on_texts(raw_data_fr_out)\ndata_fr_in = fr_tokenizer.texts_to_sequences(raw_data_fr_in)\ndata_fr_in = tf.keras.preprocessing.sequence.pad_sequences(data_fr_in,padding='post')\n\ndata_fr_out = fr_tokenizer.texts_to_sequences(raw_data_fr_out)\ndata_fr_out = tf.keras.preprocessing.sequence.pad_sequences(data_fr_out,padding='post')\n\nBATCH_SIZE = 32\ndataset = tf.data.Dataset.from_tensor_slices(\n (data_en, data_fr_in, data_fr_out))\ndataset = dataset.shuffle(20).batch(BATCH_SIZE)\n\n\n# positional embedding\ndef positional_embedding(pos, model_size):\n PE = np.zeros((1, model_size))\n for i in range(model_size):\n if i % 2 == 0:\n PE[:, i] = np.sin(pos / 10000 ** (i / model_size))\n else:\n PE[:, i] = np.cos(pos / 10000 ** ((i - 1) / model_size))\n return PE\n\nmax_length = max(len(data_en[0]), len(data_fr_in[0]))\nMODEL_SIZE = 512\n\npes = []\nfor i in range(max_length):\n pes.append(positional_embedding(i, MODEL_SIZE))\n\npes = np.concatenate(pes, axis=0)\npes = tf.constant(pes, dtype=tf.float32)\n\n\n# Multi-head attention\nclass MultiHeadAttention(tf.keras.Model):\n def __init__(self, model_size, h):\n super(MultiHeadAttention, self).__init__()\n self.query_size = model_size // h\n self.key_size = model_size // h\n self.value_size = model_size // h\n self.h = h\n self.wq = [tf.keras.layers.Dense(self.query_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wk = [tf.keras.layers.Dense(self.key_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wv = [tf.keras.layers.Dense(self.value_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(h)]\n self.wo = tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01))\n\n def call(self, decoder_output, encoder_output):\n # decoder_output has shape (batch, decoder_len, model_size)\n # encoder_output has shape (batch, encoder_len, model_size)\n heads = []\n for i in range(self.h):\n score = tf.matmul(self.wq[i](decoder_output), self.wk[i](encoder_output), transpose_b=True) / tf.math.sqrt(tf.dtypes.cast(self.key_size, tf.float32))\n # score has shape (batch, decoder_len, encoder_len)\n alignment = tf.nn.softmax(score, axis=2)\n # alignment has shape (batch, decoder_len, encoder_len)\n head = tf.matmul(alignment, self.wv[i](encoder_output))\n # head has shape (batch, decoder_len, value_size)\n heads.append(head)\n heads = tf.concat(heads, axis=2)\n heads = self.wo(heads)\n # heads has shape (batch, decoder_len, model_size)\n return heads\n \n\n# Encoder\nclass Encoder(tf.keras.Model):\n def __init__(self, vocab_size, model_size, num_layers, h):\n super(Encoder, self).__init__()\n self.model_size = model_size\n self.num_layers = num_layers\n self.h = h\n self.embedding = tf.keras.layers.Embedding(vocab_size, model_size,embeddings_regularizer=tf.keras.regularizers.L2(l2=0.01))\n self.attention = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n \n self.attention_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense_1 = [tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.dense_2 = [tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.ffn_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n def call(self, sequence):\n sub_in = []\n for i in range(sequence.shape[1]):\n embed = self.embedding(tf.expand_dims(sequence[:, i], axis=1))\n sub_in.append(embed + pes[i, :])\n \n sub_in = tf.concat(sub_in, axis=1)\n \n for i in range(self.num_layers):\n sub_out = []\n for j in range(sub_in.shape[1]):\n attention = self.attention[i](\n tf.expand_dims(sub_in[:, j, :], axis=1), sub_in)\n\n sub_out.append(attention)\n\n sub_out = tf.concat(sub_out, axis=1)\n sub_out = sub_in + sub_out\n sub_out = self.attention_norm[i](sub_out)\n \n ffn_in = sub_out\n\n ffn_out = self.dense_2[i](self.dense_1[i](ffn_in))\n ffn_out = ffn_in + ffn_out\n ffn_out = self.ffn_norm[i](ffn_out)\n\n sub_in = ffn_out\n \n return ffn_out\n\n \nclass Decoder(tf.keras.Model):\n def __init__(self, vocab_size, model_size, num_layers, h):\n super(Decoder, self).__init__()\n self.model_size = model_size\n self.num_layers = num_layers\n self.h = h\n self.embedding = tf.keras.layers.Embedding(vocab_size, model_size,embeddings_regularizer=tf.keras.regularizers.L2(l2=0.01))\n self.attention_bot = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n self.attention_bot_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n self.attention_mid = [MultiHeadAttention(model_size, h) for _ in range(num_layers)]\n self.attention_mid_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense_1 = [tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.dense_2 = [tf.keras.layers.Dense(model_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01)) for _ in range(num_layers)]\n self.ffn_norm = [tf.keras.layers.BatchNormalization() for _ in range(num_layers)]\n \n self.dense = tf.keras.layers.Dense(vocab_size, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01))\n \n def call(self, sequence, encoder_output):\n # EMBEDDING AND POSITIONAL EMBEDDING\n embed_out = []\n for i in range(sequence.shape[1]):\n embed = self.embedding(tf.expand_dims(sequence[:, i], axis=1))\n embed_out.append(embed + pes[i, :])\n \n embed_out = tf.concat(embed_out, axis=1)\n \n \n bot_sub_in = embed_out\n \n for i in range(self.num_layers):\n # BOTTOM MULTIHEAD SUB LAYER\n bot_sub_out = []\n \n for j in range(bot_sub_in.shape[1]):\n values = bot_sub_in[:, :j, :]\n attention = self.attention_bot[i](\n tf.expand_dims(bot_sub_in[:, j, :], axis=1), values)\n\n bot_sub_out.append(attention)\n bot_sub_out = tf.concat(bot_sub_out, axis=1)\n bot_sub_out = bot_sub_in + bot_sub_out\n bot_sub_out = self.attention_bot_norm[i](bot_sub_out)\n \n # MIDDLE MULTIHEAD SUB LAYER\n mid_sub_in = bot_sub_out\n\n mid_sub_out = []\n for j in range(mid_sub_in.shape[1]):\n attention = self.attention_mid[i](\n tf.expand_dims(mid_sub_in[:, j, :], axis=1), encoder_output)\n\n mid_sub_out.append(attention)\n\n mid_sub_out = tf.concat(mid_sub_out, axis=1)\n mid_sub_out = mid_sub_out + mid_sub_in\n mid_sub_out = self.attention_mid_norm[i](mid_sub_out)\n\n # FFN\n ffn_in = mid_sub_out\n\n ffn_out = self.dense_2[i](self.dense_1[i](ffn_in))\n ffn_out = ffn_out + ffn_in\n ffn_out = self.ffn_norm[i](ffn_out)\n\n bot_sub_in = ffn_out\n \n logits = self.dense(ffn_out)\n \n return logits\n \nH = 2\nNUM_LAYERS = 2\n\nen_vocab_size = len(en_tokenizer.word_index) + 1\nencoder = Encoder(en_vocab_size, MODEL_SIZE, NUM_LAYERS, H)\n\n\nprint('Input vocabulary size', en_vocab_size)\n\nfr_vocab_size = len(fr_tokenizer.word_index) + 1\nmax_len_fr = data_fr_in.shape[1]\ndecoder = Decoder(fr_vocab_size, MODEL_SIZE, NUM_LAYERS, H)\n\n\nprint('Target vocabulary size', fr_vocab_size)\n\n\n\ncrossentropy = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True)\ndef loss_func(targets, logits):\n mask = tf.math.logical_not(tf.math.equal(targets, 0))\n mask = tf.cast(mask, dtype=tf.int64)\n loss = crossentropy(targets, logits, sample_weight=mask)\n\n return loss\n\n\noptimizer = tf.keras.optimizers.Adam()\n\ndef predict(test_source_text=None):\n if test_source_text is None:\n test_source_text = raw_data_en[np.random.choice(len(raw_data_en))]\n #print(test_source_text)\n test_source_seq = en_tokenizer.texts_to_sequences([test_source_text])\n #print(test_source_seq)\n\n en_output = encoder(tf.constant(test_source_seq))\n\n de_input = tf.constant([[fr_tokenizer.word_index['']]], dtype=tf.int64)\n\n out_words = []\n\n while True:\n de_output = decoder(de_input, en_output)\n new_word = tf.expand_dims(tf.argmax(de_output, -1)[:, -1], axis=1)\n out_words.append(fr_tokenizer.index_word[new_word.numpy()[0][0]])\n\n de_input = tf.concat((de_input, new_word), axis=-1)\n\n if out_words[-1] == '' or len(out_words) >= 14:\n break\n\n return ' '.join(out_words)\n\n\n@tf.function\ndef train_step(source_seq, target_seq_in, target_seq_out):\n with tf.GradientTape() as tape:\n encoder_output = encoder(source_seq)\n \n decoder_output = decoder(target_seq_in, encoder_output)\n\n loss = loss_func(target_seq_out, decoder_output)\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n gradients = tape.gradient(loss, variables)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in zip(gradients,variables)]\n optimizer.apply_gradients(capped_gvs)\n #optimizer.apply_gradients(zip(gradients, variables))\n\n return loss\n\nNUM_EPOCHS = 1\n\nstart_time = time.time()\nfor e in range(NUM_EPOCHS):\n for batch, (source_seq, target_seq_in, target_seq_out) in enumerate(dataset.take(-1)):\n loss = train_step(source_seq, target_seq_in,\n target_seq_out)\n\n print('Epoch {} Loss {:.4f}'.format(\n e + 1, loss.numpy()))\n\nend_time = time.time()\nprint('Average elapsed time: {:.2f}s'.format((end_time - start_time) / (e + 1)))\n\n \nimport nltk\n\nbleu_sum=0\ncount=0\nfor i in range(len(test_en)):\n test_sequence=test_en[i]\n try:\n op=predict(test_sequence)\n except:\n count+=1\n continue\n op=sp.decode_pieces(op.split(' '))\n if i%1000==0:\n print(test_en[i])\n print(test_fr[i])\n print(op,'\\n')\n BLEU = nltk.translate.bleu_score.sentence_bleu([test_fr[i]], op ,weights = (0.5, 0.5))\n bleu_sum+= BLEU\n\nprint(\"BLEU Score :\",(bleu_sum/(len(test_en)-count))*100)","sub_path":"final_subword.py","file_name":"final_subword.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"503511819","text":"#!/usr/bin/python3.7\n\nimport twitter\nimport json\nfrom twitterConfig import TwitterConfig\nfrom Sink import Sink\n\nclass Tweets(): \n\n def __init__(self): \n self.config = TwitterConfig() \n self.api = twitter.Api(consumer_key=self.config.consumer_key, consumer_secret=self.config.consumer_secret,\n access_token_key=self.config.access_key, access_token_secret=self.config.access_secret,\n input_encoding=None)\n self.sink = Sink(host=\"redis\", port=\"6357\")\n\n def getTweets(self):\n stream = self.api.GetStreamFilter(track=['@realDonaldTrump'])\n try:\n for line in stream:\n print(json.dumps(line))\n self.sink.write(json.dumps(line))\n finally:\n stream.close()\n \ndef main():\n tweet = Tweets()\n tweet.getTweets()\n\nif __name__ == \"__main__\":\n main()","sub_path":"Tweets.py","file_name":"Tweets.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"564494455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\" Main script for connecting to a MQTT queue \"\"\"\n\nimport paho.mqtt.client\nimport json\n\ndef on_message(client, userdata, msg, ):\n print(\"{} Payload -> {}\".format(msg.topic, msg.payload.decode()))\n # client.publish('output', msg.payload.decode())\n\n\ndef on_publish(client, userdata, messageId):\n print(\"MessageID: \"+str(messageId))\n\n\ndef on_subscribe(client, userdata, messageId, granted_qos):\n print(\"Subscribed: \"+str(messageId)+\" \"+str(granted_qos))\n\n\ndef on_log(client, userdata, level, string):\n print(string)\n\n\nif __name__ == \"__main__\":\n\n # read config file\n with open('config.json', 'r') as filePointer:\n cfg = json.load(filePointer)\n\n client = paho.mqtt.client.Client()\n client.username_pw_set(cfg['mqtt']['user'], cfg['mqtt']['pass'])\n client.on_message = on_message\n client.on_publish = on_publish\n client.on_subscribe = on_subscribe\n\n try:\n client.connect(cfg['mqtt']['url'], int(cfg['mqtt']['port']), 60)\n\n for topic in cfg['topics'].values():\n print(topic)\n client.subscribe(topic, 0)\n\n client.loop_forever()\n\n except KeyboardInterrupt:\n print(\"\\nMQTT Ended\")\n\n finally:\n client.disconnect()","sub_path":"mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"641701183","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 18 13:56:59 2017\n\n@author: Mauro\n\"\"\"\n\nfrom LanguageSupport import _\n\nclass TestLS:\n \n def __init__(self):\n \n self.mypoints = 10\n \n \n def showPoints(self):\n \n sdb = {}\n sdb[\"points\"] = self.mypoints\n \n msg = \"My points = {points}\"\n \n msg = _(msg, \"it-IT\")\n \n msg = msg.format(**sdb)\n \n return msg","sub_path":"src/test_ls.py","file_name":"test_ls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"19576870","text":"import configparser\nimport csv\nimport json\nimport os\nimport re\nimport sys\n\nimport pycountry\n\n# import requests\nimport xlsxwriter\n\n# from selenium import webdriver\n# from selenium.webdriver.common.by import By\n# from selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.support import expected_conditions as EC\n# from selenium.webdriver.support.ui import WebDriverWait\n\n# import xlrd\n\n\n# options = webdriver.ChromeOptions()\n# options.add_argument(\"headless\")\n# options.add_argument(\"log-level=3\")\n# browser = webdriver.Chrome(os.getcwd() + \"\\chromedriver.exe\", chrome_options=options)\n# print(\"Logging in to CF...\")\n# browser.get(\"https://app.clickfunnels.com/users/sign_out\") # sign out first\n# browser.get(\"https://app.clickfunnels.com/users/sign_in\") # sign in\n# userNameField = browser.find_element_by_id(\"user_email\")\n# userNameField.send_keys(\"***REMOVED***\")\n# pwField = browser.find_element_by_id(\"user_password\")\n# pwField.send_keys(\"***REMOVED***\")\n# pwField.send_keys(Keys.ENTER)\n\n\n# config variables\npardir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\nconfig = configparser.ConfigParser()\nconfig.read(pardir + \"\\Step_1_config.txt\")\n\n# get Country codes from pycountry module and config\ncountryCodes = {}\nfor country in pycountry.countries:\n countryCodes[country.name.upper()] = country.alpha_2\nfor key in config[\"Additional countries\"]:\n countryCodes[str(key).upper()] = (\n config[\"Additional countries\"][str(key).upper()]\n ).upper()\n\n\nSG_LABELS_WB = xlsxwriter.Workbook(pardir + \"\\\\zz_sg_labels.xlsx\")\nMY_LABELS_WB = xlsxwriter.Workbook(pardir + \"\\\\zz_my_labels.xlsx\")\nSG_LABELS_WS = SG_LABELS_WB.add_worksheet()\nMY_LABELS_WS = MY_LABELS_WB.add_worksheet()\nSG_LABELS_index = 0\nMY_LABELS_index = 0\n\nfunnels = config.sections()\nfunnels.remove(\"Additional countries\")\nfor funnel in funnels:\n # URL = config[funnel][\"URL\"]\n excludedProducts = json.loads(config[funnel][\"excludedProducts\"])\n bookCode = config[funnel][\"bookCode\"]\n startingNumber = int(config[funnel][\"startingNumber\"])\n lastNameProcessed = config[funnel][\"lastNameProcessed\"]\n lastCountryProcessed = config[funnel][\"lastCountryProcessed\"]\n\n # print(\"Generating sales list for \" + funnel + \"...\")\n # browser.get(URL)\n # elem = browser.find_element_by_xpath(\"//a[@class='btn btn-default export-link']\")\n # elem.click() # generate sales list\n\n # print(\"Downloading sales list for \" + funnel + \"...\")\n # wait = WebDriverWait(browser, 600) # wait for CF to provide download link\n # elem = wait.until(\n # EC.element_to_be_clickable(\n # (By.XPATH, \"//a[@class='btn btn-primary' and text() = 'Download']\")\n # )\n # )\n # r = requests.get(elem.get_attribute(\"href\"), allow_redirects=True)\n fileName = \"\\\\\" + funnel + \"_sales.csv\"\n # open(pardir + fileName, \"wb\").write(r.content)\n # print(\"Download complete!\")\n\n fileSrc = pardir + fileName\n\n # read csv file from ClickFunnels\n rawData = []\n with open(fileSrc, newline=\"\", encoding=\"utf-8\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n rawData.append(row)\n\n # remove records that have already been processed\n lastRecordedIndex = 0\n for i, col in enumerate(rawData[0:]):\n if (\n lastNameProcessed.upper() == col[0].strip().upper()\n and lastCountryProcessed.upper() == col[15].upper()\n ):\n lastRecordedIndex = i\n del rawData[: lastRecordedIndex + 1]\n\n # error handling\n if rawData == []:\n print(\"NO NEW SALES FOR \" + funnel + \"!\")\n continue\n\n # save processed name into config\n config[funnel][\"lastNameProcessed\"] = rawData[-1][0].strip()\n config[funnel][\"lastCountryProcessed\"] = rawData[-1][15]\n\n # remove unnecessary products\n for product in excludedProducts:\n rawData = [col for col in rawData if product not in col[17]]\n\n # output files\n SG_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_sg.xlsx\")\n MY_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_my.xlsx\")\n PH_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_ph.xlsx\")\n OTHERS_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_others.xlsx\")\n DHL_WB = xlsxwriter.Workbook(pardir + \"\\\\\" + funnel + \"_DHL.xlsx\")\n SG_WS = SG_WB.add_worksheet()\n MY_WS = MY_WB.add_worksheet()\n PH_WS = PH_WB.add_worksheet()\n OTHERS_WS = OTHERS_WB.add_worksheet()\n DHL_WS = DHL_WB.add_worksheet()\n\n # merge orders going to same address\n while True:\n addresses = []\n for col in rawData[0:]:\n addresses.append(col[11])\n totalCount = 0\n for address in addresses:\n totalCount += addresses.count(address)\n lastIndex = \"\"\n firstIndex = \"\"\n if (\n addresses.count(address) != 1\n ): # each address should only exist once in list\n for i, col in enumerate(rawData[0:]):\n if col[11] == address:\n firstIndex = i\n break # first index found\n for i, col in enumerate(rawData[0:]):\n if col[11] == address:\n lastIndex = i\n rawData[firstIndex][17] += \",\" + rawData[lastIndex][17]\n del rawData[lastIndex]\n break # one address merged, restart while loop\n if totalCount == len(addresses): # no duplicates\n break\n\n # write headers\n for i, header in enumerate(\n [\n \"NAME\",\n \"EMAIL\",\n \"TRACKING NO.\",\n \"PHONE\",\n \"ADDRESS\",\n \"CITY\",\n \"STATE\",\n \"POSCODE\",\n \"COUNTRY\",\n \"ORDERS\",\n \"STATUS\",\n \"COST\",\n ]\n ):\n SG_WS.write(0, i, header)\n MY_WS.write(0, i, header)\n PH_WS.write(0, i, header)\n OTHERS_WS.write(0, i, header)\n SG_LABELS_WS.write(0, i, header)\n MY_LABELS_WS.write(0, i, header)\n\n for i, header in enumerate(\n [\n \"Pick-up Account Number\",\n \"Shipment Order ID\",\n \"Shipping Service Code\",\n \"Consignee Name\",\n \"Address Line 1\",\n \"Address Line 2\",\n \"Address Line 3\",\n \"City\",\n \"State (M)\",\n \"Postal Code (M)\",\n \"Destination Country Code\",\n \"Phone Number\",\n \"Email Address\",\n \"Shipment Weight (g)\",\n \"Currency Code\",\n \"Total Declared Value\",\n \"Incoterm\",\n \"Shipment Description\",\n \"Content Description\",\n \"Content Export Description\",\n \"Content Unit Price\",\n \"Content Origin Country\",\n \"Content Quantity\",\n \"Content Code\",\n \"Content Indicator\",\n \"Remarks\",\n ]\n ):\n DHL_WS.write(0, i, header)\n\n SGindex = 1\n MYindex = 1\n PHindex = 1\n OTHERSindex = 1\n DHLindex = 1\n wsOut = \"\"\n indexOut = \"\"\n # output by country\n for col in rawData[0:]:\n shipmentOrderID = \"\"\n if col[15].lower() == \"singapore\" or col[15].lower() == \"sg\":\n indexOut = SGindex\n SGindex += 1\n SG_LABELS_index += 1\n wsOut = SG_WS\n elif (col[15].lower() in [\"malaysia\", \"hong kong\", \"canada\", \"iran\"]) or (\n col[15].lower() in [\"my\", \"hk\", \"ca\", \"ir\"]\n ):\n indexOut = MYindex\n MYindex += 1\n MY_LABELS_index += 1\n wsOut = MY_WS\n elif col[15].lower() == \"philippines\" or col[15].lower() == \"ph\":\n indexOut = PHindex\n PHindex += 1\n wsOut = PH_WS\n else:\n indexOut = OTHERSindex\n OTHERSindex += 1\n wsOut = OTHERS_WS\n\n # handle DHL output\n quantityTotal = 0\n tmpOrders = \",\" + col[17]\n commas = [m.start() for m in re.finditer(\",\", tmpOrders)]\n Xs = [m.start() for m in re.finditer(\" X \", tmpOrders)]\n for startQuantity, endQuantity in zip(commas, Xs):\n # tmpstrr = tmpOrders[startQuantity + 1 : endQuantity]\n # print(tmpstrr)\n quantityTotal += int(tmpOrders[startQuantity + 1 : endQuantity])\n\n quantity28000 = 0\n if \" X 28000 Book\" in tmpOrders:\n endIndex = [m.start() for m in re.finditer(\" X 28000\", tmpOrders)]\n for i in endIndex:\n startIndex = tmpOrders[:i].rindex(\",\") + 1\n quantity28000 += int(tmpOrders[startIndex:i])\n\n if (len(col[8])) == 2:\n countryCode = col[8].upper()\n else:\n countryCode = countryCodes.get(col[8].upper())\n if countryCode is None:\n print(\n \"Country code not found for:\", col[8].upper()\n ) # dbg add file name and row here\n\n if countryCode in [\"SG\", \"TH\", \"AU\", \"GB\"]:\n shippingServiceCode = \"PLT\"\n incoterm = \"DDP\"\n elif countryCode == \"US\":\n shippingServiceCode = \"PLE\"\n incoterm = \"DDP\"\n else:\n shippingServiceCode = \"PPS\"\n incoterm = \"DDU\"\n\n shipmentOrderID = bookCode + str(startingNumber)\n\n # All books(250g) except 28000 book(425g)\n weight = (quantityTotal - quantity28000) * 250 + (quantity28000 * 425)\n\n # RM10 per book, max RM50\n declaredValue = min(quantityTotal * 10, 50)\n\n for k, content in enumerate(\n [\n \"5345221\",\n shipmentOrderID,\n shippingServiceCode,\n col[0][:30], # name is MAX 30 CHARS\n col[11],\n \"\",\n \"\",\n col[6],\n col[7],\n col[9],\n countryCode,\n col[10],\n col[3],\n weight,\n \"MYR\",\n declaredValue,\n incoterm,\n \"educational book, perfect bound book\",\n \"educational book, perfect bound book\",\n \"\",\n declaredValue,\n \"MY\",\n 1,\n shipmentOrderID,\n \"\",\n \"\",\n ]\n ):\n DHL_WS.write(DHLindex, k, content)\n DHLindex += 1\n startingNumber += 1\n\n for j, content in enumerate(\n [\n col[0],\n col[3],\n shipmentOrderID,\n col[10],\n col[11],\n col[13],\n col[14],\n col[16],\n col[15],\n col[17],\n \"\",\n \"\",\n ]\n ):\n wsOut.write(indexOut, j, content)\n if wsOut == SG_WS:\n SG_LABELS_WS.write(SG_LABELS_index, j, content)\n elif wsOut == MY_WS:\n MY_LABELS_WS.write(MY_LABELS_index, j, content)\n\n config[funnel][\"startingnumber\"] = str(startingNumber)\n\n # cleanup\n SG_WB.close()\n MY_WB.close()\n PH_WB.close()\n OTHERS_WB.close()\n DHL_WB.close()\n print(\"Output for \" + funnel + \" complete!\")\n\nSG_LABELS_WB.close()\nMY_LABELS_WB.close()\n\nwith open(pardir + \"\\Step_1_config.txt\", \"w\") as configfile:\n config.write(configfile)\n","sub_path":"prj/src/Step_1_Filter_addresses.py","file_name":"Step_1_Filter_addresses.py","file_ext":"py","file_size_in_byte":11704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"527581709","text":"from timer import Timer\r\nfrom paths import get_path_data\r\nfrom io_util import IOUtil\r\n\r\ndef main(sellerApi, category):\r\n timer = Timer()\r\n \r\n timer.start()\r\n if category['id'] is None:\r\n print(\"Top offers...\")\r\n offers = sellerApi.get_top_offers()\r\n else:\r\n offers = sellerApi.get_offers(category)\r\n print(\"Time elapsed to get {0} produts: \".format(len(offers)), timer.diff())\r\n \r\n timer.start()\r\n adds, deletes, updates = sellerApi.process_offers(offers, category)\r\n print(\"Time elapsed to process the products: \", timer.diff())\r\n\r\n print(\"Produts to add: \", len(adds))\r\n print(\"Produts to update: \", len(updates))\r\n print(\"Produts to delete: \", len(deletes))\r\n\r\n # Salvando os ultimos produtos enviados para o csv\r\n IOUtil.dic_to_json( 'to_add.json', category['id'], adds )\r\n IOUtil.dic_to_json( 'to_update.json', category['id'], updates )\r\n IOUtil.dic_to_json( 'to_delete.json', category['id'], deletes )\r\n\r\n # IOUtil.dic_to_csv( 'to_add.csv', category['id'], adds )\r\n # IOUtil.dic_to_csv( 'to_update.csv', category['id'], updates )\r\n # IOUtil.dic_to_csv( 'to_delete.csv', category['id'], deletes )","sub_path":"api_c_process.py","file_name":"api_c_process.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"607864998","text":"#coding=utf8\n#######################################################\n#filename: TestCase.py\n#author: defias\n#date: 2016-3\n#function: TEST CASE\n#######################################################\nimport unittest,time,os,sys\nsys.path.append(\"..\")\nfrom PageObject import IndexPage\nfrom Login import Login\n\nclass TestLogin(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.LoginO = Login()\n cls.LoginO.open() #打开页面\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n self.assertEqual(self.verificationErrors, [])\n\n @classmethod\n def tearDownClass(cls):\n cls.LoginO.close() #关闭页面\n\n def test_login_success_001(self):\n '''正常登录登出'''\n po = IndexPage.IndexPage()\n po.InputUser('root')\n po.InputPasswd('root123')\n po.CkLoginButton()\n self.assertEqual(u'您好, 超级管理员 ', po.LoginSucessCheck())\n po.Logout()\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","sub_path":"src/TestCaseLib/TestLogin.py","file_name":"TestLogin.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"42853147","text":"import hashlib\nimport logging\n\nfrom PIL import Image, ImageColor, ImageDraw, ImageFilter\n\nfrom django.apps import apps\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .layers import layer_decorations, layer_saved_transformations\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass BaseTransformationType(type):\n def __str__(self):\n return force_text(s=self.label)\n\n\nclass BaseTransformation(metaclass=BaseTransformationType):\n \"\"\"\n Transformation can modify the appearance of the document's page preview.\n Some transformation available are: Rotate, zoom, resize and crop.\n \"\"\"\n arguments = ()\n name = 'base_transformation'\n _layer_transformations = {}\n _registry = {}\n\n @staticmethod\n def combine(transformations):\n result = None\n\n for transformation in transformations:\n if not result:\n result = hashlib.sha256(transformation.cache_hash())\n else:\n result.update(transformation.cache_hash())\n\n return result.hexdigest()\n\n @classmethod\n def get(cls, name):\n return cls._registry[name]\n\n @classmethod\n def get_arguments(cls):\n return cls.arguments\n\n @classmethod\n def get_assigned_layer(cls):\n for layer, transformations in cls._layer_transformations.items():\n if cls in transformations:\n return layer\n\n @classmethod\n def get_label(cls):\n arguments = cls.get_arguments()\n if arguments:\n return format_lazy('{}: {}', cls.label, ', '.join(arguments))\n else:\n return cls.label\n\n @classmethod\n def get_transformation_choices(cls, group_by_layer=False, layer=None):\n if layer:\n transformation_list = [\n (transformation.name, transformation) for transformation in cls._layer_transformations.get(layer, ())\n ]\n else:\n transformation_list = cls._registry.items()\n\n if group_by_layer:\n flat_transformation_list = [\n klass for name, klass in transformation_list\n ]\n\n result = {}\n for layer, transformations in cls._layer_transformations.items():\n for transformation in transformations:\n if transformation in flat_transformation_list:\n result.setdefault(layer, [])\n result[layer].append(\n (transformation.name, transformation.get_label())\n )\n\n result = [\n (layer.label, transformations) for layer, transformations in result.items()\n ]\n\n # Sort by transformation group, then each transformation in the\n # group.\n return sorted(result, key=lambda x: (x[0], x[1]))\n else:\n return sorted(\n [\n (name, klass.get_label()) for name, klass in transformation_list\n ]\n )\n\n @classmethod\n def register(cls, layer, transformation):\n cls._registry[transformation.name] = transformation\n cls._layer_transformations.setdefault(layer, set())\n cls._layer_transformations[layer].add(transformation)\n\n def __init__(self, **kwargs):\n self.kwargs = {}\n for argument_name in self.__class__.get_arguments():\n setattr(self, argument_name, kwargs.get(argument_name))\n self.kwargs[argument_name] = kwargs.get(argument_name)\n\n def cache_hash(self):\n result = hashlib.sha256(force_bytes(s=self.name))\n\n # Sort arguments for guaranteed repeatability\n for key, value in sorted(self.kwargs.items()):\n result.update(force_bytes(s=key))\n result.update(force_bytes(s=value))\n\n return force_bytes(s=result.hexdigest())\n\n def execute_on(self, image):\n self.image = image\n self.aspect = 1.0 * image.size[0] / image.size[1]\n\n\nclass AssertTransformationMixin:\n @classmethod\n def get_arguments(cls):\n arguments = super().get_arguments() + (\n 'asset_name', 'rotation', 'transparency', 'zoom'\n )\n return arguments\n\n def get_asset_images(self, asset_name):\n try:\n transparency = float(self.transparency or '100.0')\n except ValueError:\n transparency = 100\n\n if transparency < 0:\n transparency = 0\n elif transparency > 100:\n transparency = 100\n\n try:\n rotation = int(self.rotation or '0') % 360\n except ValueError:\n rotation = 0\n\n try:\n zoom = float(self.zoom or '100.0')\n except ValueError:\n zoom = 100.0\n\n Asset = apps.get_model(app_label='converter', model_name='Asset')\n\n try:\n asset = Asset.objects.get(internal_name=asset_name)\n except Asset.DoesNotExist:\n logger.error('Asset \"%s\" not found.', asset_name)\n raise\n else:\n image_asset = asset.get_image()\n\n if image_asset.mode != 'RGBA':\n image_asset.putalpha(alpha=255)\n\n image_asset = image_asset.rotate(\n angle=360 - rotation, resample=Image.BICUBIC,\n expand=True\n )\n\n if zoom != 100.0:\n decimal_value = zoom / 100.0\n image_asset = image_asset.resize(\n (\n int(image_asset.size[0] * decimal_value),\n int(image_asset.size[1] * decimal_value)\n ), Image.ANTIALIAS\n )\n\n paste_mask = image_asset.getchannel(channel='A').point(\n lambda i: i * transparency / 100.0\n )\n\n return {\n 'image_asset': image_asset, 'paste_mask': paste_mask\n }\n\n\nclass TransformationAssetPaste(AssertTransformationMixin, BaseTransformation):\n arguments = ('left', 'top')\n label = _('Paste an asset')\n name = 'paste_asset'\n\n def _execute_on(self, *args, **kwargs):\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n asset_name = getattr(self, 'asset_name', None)\n\n if asset_name:\n align_horizontal = getattr(self, 'align_horizontal', 'left')\n align_vertical = getattr(self, 'align_vertical', 'top')\n\n result = self.get_asset_images(asset_name=asset_name)\n if result:\n if align_horizontal == 'left':\n left = left\n elif align_horizontal == 'center':\n left = int(left - result['image_asset'].size[0] / 2)\n elif align_horizontal == 'right':\n left = int(left - result['image_asset'].size[0])\n\n if align_vertical == 'top':\n top = top\n elif align_vertical == 'middle':\n top = int(top - result['image_asset'].size[1] / 2)\n elif align_vertical == 'bottom':\n top = int(top - result['image_asset'].size[1])\n\n self.image.paste(\n im=result['image_asset'], box=(left, top),\n mask=result['paste_mask']\n )\n else:\n logger.error('No asset name specified.')\n\n return self.image\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n return self._execute_on(self, *args, **kwargs)\n\n\nclass TransformationAssetPastePercent(TransformationAssetPaste):\n label = _('Paste an asset (percents coordinates)')\n name = 'paste_asset_percent'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = float(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = float(self.top or '0')\n except ValueError:\n top = 0\n\n if left < 0:\n left = 0\n\n if left > 100:\n left = 100\n\n if top < 0:\n top = 0\n\n if top > 100:\n top = 100\n\n self.left = left / 100.0 * self.image.size[0]\n self.top = top / 100.0 * self.image.size[1]\n self.align_horizontal = 'center'\n self.align_vertical = 'middle'\n\n return self._execute_on(self, *args, **kwargs)\n\n\nclass TransformationAssetWatermark(\n AssertTransformationMixin, BaseTransformation\n):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'horizontal_increment',\n 'vertical_increment'\n )\n label = _('Paste an asset as watermark')\n name = 'paste_asset_watermark'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n asset_name = getattr(self, 'asset_name', None)\n\n if asset_name:\n result = self.get_asset_images(asset_name=asset_name)\n if result:\n try:\n horizontal_increment = int(self.horizontal_increment or '0')\n except ValueError:\n horizontal_increment = 0\n\n try:\n vertical_increment = int(self.vertical_increment or '0')\n except ValueError:\n vertical_increment = 0\n\n if horizontal_increment == 0:\n horizontal_increment = result['paste_mask'].size[0]\n\n if vertical_increment == 0:\n vertical_increment = result['paste_mask'].size[1]\n\n for x in range(left, right or self.image.size[0], horizontal_increment):\n for y in range(top, bottom or self.image.size[1], vertical_increment):\n self.image.paste(\n im=result['image_asset'], box=(x, y),\n mask=result['paste_mask']\n )\n else:\n logger.error('No asset name specified.')\n\n return self.image\n\n\nclass TransformationCrop(BaseTransformation):\n arguments = ('left', 'top', 'right', 'bottom',)\n label = _('Crop')\n name = 'crop'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > self.image.size[0] - 1:\n left = self.image.size[0] - 1\n\n if top < 0:\n top = 0\n\n if top > self.image.size[1] - 1:\n top = self.image.size[1] - 1\n\n if right < 0:\n right = 0\n\n if right > self.image.size[0] - 1:\n right = self.image.size[0] - 1\n\n if bottom < 0:\n bottom = 0\n\n if bottom > self.image.size[1] - 1:\n bottom = self.image.size[1] - 1\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n right = self.image.size[0] - right\n bottom = self.image.size[1] - bottom\n\n if left > right:\n left = right - 1\n\n if top > bottom:\n top = bottom - 1\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n return self.image.crop((left, top, right, bottom))\n\n\nclass TransformationDrawRectangle(BaseTransformation):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'fillcolor', 'outlinecolor',\n 'outlinewidth'\n )\n label = _('Draw rectangle')\n name = 'draw_rectangle'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = int(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = int(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = int(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = int(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > self.image.size[0] - 1:\n left = self.image.size[0] - 1\n\n if top < 0:\n top = 0\n\n if top > self.image.size[1] - 1:\n top = self.image.size[1] - 1\n\n if right < 0:\n right = 0\n\n if right > self.image.size[0] - 1:\n right = self.image.size[0] - 1\n\n if bottom < 0:\n bottom = 0\n\n if bottom > self.image.size[1] - 1:\n bottom = self.image.size[1] - 1\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n right = self.image.size[0] - right\n bottom = self.image.size[1] - bottom\n\n if left > right:\n left = right - 1\n\n if top > bottom:\n top = bottom - 1\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fill_color = ImageColor.getrgb(fillcolor_value)\n else:\n fill_color = 0\n\n outlinecolor_value = getattr(self, 'outlinecolor', None)\n if outlinecolor_value:\n outline_color = ImageColor.getrgb(outlinecolor_value)\n else:\n outline_color = None\n\n outlinewidth_value = getattr(self, 'outlinewidth', None)\n if outlinewidth_value:\n outline_width = int(outlinewidth_value)\n else:\n outline_width = 0\n\n draw = ImageDraw.Draw(self.image)\n draw.rectangle(\n (left, top, right, bottom), fill=fill_color, outline=outline_color,\n width=outline_width\n )\n\n return self.image\n\n\nclass TransformationDrawRectanglePercent(BaseTransformation):\n arguments = (\n 'left', 'top', 'right', 'bottom', 'fillcolor', 'outlinecolor',\n 'outlinewidth'\n )\n label = _('Draw rectangle (percents coordinates)')\n name = 'draw_rectangle_percent'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n try:\n left = float(self.left or '0')\n except ValueError:\n left = 0\n\n try:\n top = float(self.top or '0')\n except ValueError:\n top = 0\n\n try:\n right = float(self.right or '0')\n except ValueError:\n right = 0\n\n try:\n bottom = float(self.bottom or '0')\n except ValueError:\n bottom = 0\n\n if left < 0:\n left = 0\n\n if left > 100:\n left = 100\n\n if top < 0:\n top = 0\n\n if top > 100:\n top = 100\n\n if right < 0:\n right = 0\n\n if right > 100:\n right = 100\n\n if bottom < 0:\n bottom = 0\n\n if bottom > 100:\n bottom = 100\n\n logger.debug(\n 'left: %f, top: %f, right: %f, bottom: %f', left, top, right,\n bottom\n )\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fill_color = ImageColor.getrgb(fillcolor_value)\n else:\n fill_color = 0\n\n outlinecolor_value = getattr(self, 'outlinecolor', None)\n if outlinecolor_value:\n outline_color = ImageColor.getrgb(outlinecolor_value)\n else:\n outline_color = None\n\n outlinewidth_value = getattr(self, 'outlinewidth', None)\n if outlinewidth_value:\n outline_width = int(outlinewidth_value)\n else:\n outline_width = 0\n\n left = left / 100.0 * self.image.size[0]\n top = top / 100.0 * self.image.size[1]\n\n # Invert right value\n # Pillow uses left, top, right, bottom to define a viewport\n # of real coordinates\n # We invert the right and bottom to define a viewport\n # that can crop from the right and bottom borders without\n # having to know the real dimensions of an image\n\n right = self.image.size[0] - (right / 100.0 * self.image.size[0])\n bottom = self.image.size[1] - (bottom / 100.0 * self.image.size[1])\n\n draw = ImageDraw.Draw(self.image)\n draw.rectangle(\n (left, top, right, bottom), fill=fill_color, outline=outline_color,\n width=outline_width\n )\n\n return self.image\n\n\nclass TransformationFlip(BaseTransformation):\n arguments = ()\n label = _('Flip')\n name = 'flip'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.transpose(Image.FLIP_TOP_BOTTOM)\n\n\nclass TransformationGaussianBlur(BaseTransformation):\n arguments = ('radius',)\n label = _('Gaussian blur')\n name = 'gaussianblur'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.filter(ImageFilter.GaussianBlur(radius=self.radius))\n\n\nclass TransformationLineArt(BaseTransformation):\n label = _('Line art')\n name = 'lineart'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.convert('L').point(lambda x: 0 if x < 128 else 255, '1')\n\n\nclass TransformationMirror(BaseTransformation):\n arguments = ()\n label = _('Mirror')\n name = 'mirror'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.transpose(Image.FLIP_LEFT_RIGHT)\n\n\nclass TransformationResize(BaseTransformation):\n arguments = ('width', 'height')\n label = _('Resize')\n name = 'resize'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n width = int(self.width)\n height = int(self.height or 1.0 * width / self.aspect)\n\n factor = 1\n while self.image.size[0] / factor > 2 * width and self.image.size[1] * 2 / factor > 2 * height:\n factor *= 2\n\n if factor > 1:\n self.image.thumbnail(\n (self.image.size[0] / factor, self.image.size[1] / factor),\n Image.NEAREST\n )\n\n # Resize the image with best quality algorithm ANTI-ALIAS\n self.image.thumbnail((width, height), Image.ANTIALIAS)\n\n return self.image\n\n\nclass TransformationRotate(BaseTransformation):\n arguments = ('degrees', 'fillcolor')\n label = _('Rotate')\n name = 'rotate'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n self.degrees %= 360\n\n if self.degrees == 0:\n return self.image\n\n fillcolor_value = getattr(self, 'fillcolor', None)\n if fillcolor_value:\n fillcolor = ImageColor.getrgb(fillcolor_value)\n else:\n fillcolor = None\n\n return self.image.rotate(\n angle=360 - self.degrees, resample=Image.BICUBIC, expand=True,\n fillcolor=fillcolor\n )\n\n\nclass TransformationRotate90(TransformationRotate):\n arguments = ()\n degrees = 90\n label = _('Rotate 90 degrees')\n name = 'rotate90'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 90\n\n\nclass TransformationRotate180(TransformationRotate):\n arguments = ()\n degrees = 180\n label = _('Rotate 180 degrees')\n name = 'rotate180'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 180\n\n\nclass TransformationRotate270(TransformationRotate):\n arguments = ()\n degrees = 270\n label = _('Rotate 270 degrees')\n name = 'rotate270'\n\n def __init__(self, **kwargs):\n super().__init__()\n self.kwargs['degrees'] = 270\n\n\nclass TransformationUnsharpMask(BaseTransformation):\n arguments = ('radius', 'percent', 'threshold')\n label = _('Unsharp masking')\n name = 'unsharpmask'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n return self.image.filter(\n ImageFilter.UnsharpMask(\n radius=self.radius, percent=self.percent,\n threshold=self.threshold\n )\n )\n\n\nclass TransformationZoom(BaseTransformation):\n arguments = ('percent',)\n label = _('Zoom')\n name = 'zoom'\n\n def execute_on(self, *args, **kwargs):\n super().execute_on(*args, **kwargs)\n\n if self.percent == 100:\n return self.image\n\n decimal_value = float(self.percent) / 100\n return self.image.resize(\n (\n int(self.image.size[0] * decimal_value),\n int(self.image.size[1] * decimal_value)\n ), Image.ANTIALIAS\n )\n\n\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetPaste\n)\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetPastePercent\n)\nBaseTransformation.register(\n layer=layer_decorations, transformation=TransformationAssetWatermark\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationCrop\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationDrawRectangle\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationDrawRectanglePercent\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationFlip\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationGaussianBlur\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationLineArt\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationMirror\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationResize\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate90\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate180\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationRotate270\n)\nBaseTransformation.register(\n layer=layer_saved_transformations,\n transformation=TransformationUnsharpMask\n)\nBaseTransformation.register(\n layer=layer_saved_transformations, transformation=TransformationZoom\n)\n","sub_path":"mayan/apps/converter/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":23939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"66579316","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nimport sys\nimport urllib\nfrom bs4 import BeautifulSoup\nreload(sys)\nsys.setdefaultencoding('utf-8')\ndef get_html(url): #通过url获取网页内容\n result = urllib.urlopen(url)\n return result.read()\n # save_file(result.read(), 'thefile.txt')\ndef get_movie_all(html): #通过soup提取到每个电影的全部信息,以list返回\n soup = BeautifulSoup(html,\"html.parser\")\n movie_1 = soup.find_all('ul', class_=\"content-meta info\")\n movie_2=soup.find_all('title')\n movie_3=soup.find_all('div', class_=\"castSection \")\n movie_4=soup.find_all('div', id=\"scoreStats\" ,class_=\"hidden-xs\")\n movie_5=soup.find_all('span', class_=\"meter-value superPageFontColor\")\n movie_str=str(movie_1[0])+str(movie_2[0])+str(movie_3[0])+str(movie_4[0])+str(movie_5[0])\n movie=[movie_str]\n return movie\ndef get_movie_one(movie):\n result = [] # 用于存储提取出来的电影信息\n soup_all = BeautifulSoup(str(movie),\"html.parser\")\n title = soup_all.find_all('title')\n soup_title = BeautifulSoup(str(title[0]),\"html.parser\")\n for line in soup_title.stripped_strings: # 对获取到的里的内容进行提取\n result.append(line)\n result_str=\" | Fresh:\"\n\n fresh=soup_all.find_all('span', class_=\"meter-value superPageFontColor\")\n soup_fresh=BeautifulSoup(str(fresh[0]),\"html.parser\")\n for line in soup_fresh.stripped_strings:\n result_str=result_str+line\n\n rating=soup_all.find_all('div', class_=\"superPageFontColor\")\n soup_rating=BeautifulSoup(str(rating[0]),\"html.parser\")\n for line in soup_rating.stripped_strings:\n result_str=result_str+line\n result_str=result_str+\" | Actor:\"\n\n actor=soup_all.find_all('a', class_=\"unstyled articleLink\")\n for it_actor in actor:\n soup_actor = BeautifulSoup(str(it_actor),\"html.parser\")\n for line in soup_actor.stripped_strings:\n result_str = result_str + line + \" \"\n\n info=soup_all.find_all('li' ,class_=\"meta-row clearfix\")\n for it_info in info:\n soup_info=BeautifulSoup(str(it_info),\"html.parser\")\n for line in soup_info.stripped_strings:\n result_str=result_str+line+\" \"\n\n\n result.append(result_str)\n\n\n\n return result #返回获取到的结果\ndef save_file(text, filename): #保存网页到文件\n f= open(filename,'ab')\n f.write(bytes(text))\n f.close()\ndef read_file(filename): #读取文件\n f = open(filename,'r')\n text = f.read()\n f.close()\n return text\ndef work():\n try:\n f = open('RottenTomatoes_by_TV.txt', 'r')\n\n name = f.read()\n finally:\n if f:\n f.close()\n w = open('RottenTomatoes_by_TV.txt', 'w')\n w.truncate()\n w.close()\n url = 'https://www.rottentomatoes.com/tv/'+name\n html = get_html(url)\n movie_list = get_movie_all(html)\n for movie in movie_list: # 将每一页中的每个电影信息放入函数中提取\n result = get_movie_one(movie)\n text = '' + 'TV:' + str(result[0]) + str(result[1]) + '\\n' + '\\t'\n save_file(text, 'RottenTomatoes_by_TV.txt')\n\n\nif __name__=='__main__':\n work()","sub_path":"Catcher/RottenTomatoes_by_TV.py","file_name":"RottenTomatoes_by_TV.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"266939758","text":"#!/usr/bin/env python3\n\n# import Python modules\nimport os , re, csv, operator\n\n# initialse dictionaries\nerrors = {}\nper_user = {}\n\n# Create a raw-strng REG-EX search pattern\n# this will create 3 groups:\n# Group 1 will catch ERROR or INFO: ([A-Z]*)\n# Group 2 will catch entire ERROR message: ([\\w\\S ']*)\n# Group 3 will cacth user name (escaping parentesis): \\(([\\Sa-z]*)\\)\nregex = r\"ticky: ([A-Z]*)([\\w\\S ']*)\\(([\\Sa-z]*)\\)\"\n\n# Open a log file to itearte through the file and add items to dictinaries:\nwith open(\"syslog.log\") as file:\n for log in file.readlines():\n result = re.search(regex, log)\n \"\"\" Add new user key vluse pair to per_user={} dictionary if\n error key does not exist in dictionary. \"\"\"\n if result != None and result.group(3) not in per_user:\n per_user.update({result.group(3):[0,0]})\n # Yes, this creates list as a value which will create a problem later. See: **** Problem *** \n else:\n NotImplemented\n # When ERROR messages are parsed\n if result != None and result.group(1) == \"ERROR\": \n per_user[result.group(3)][1] += 1\n \"\"\" Add new error key value pair to errors={} dictionary if\n error key does not exist in dictionary. \"\"\"\n if result.group(2) not in errors:\n errors.update({result.group(2):1})\n else:\n # When INFO messages are parsed\n errors[result.group(2)] += 1\n else:\n per_user[result.group(3)][0] += 1\n file.close()\n\n# Sorting dictionaries errors={} and per_user={}\n# The error dictionary should be sorted by the number of errors from most common to least common\n# The user dictionary should be sorted by username\n# NB! this chages data type from dict() to list()\nsorted_errors = sorted(errors.items(), key = operator.itemgetter(1), reverse=True)\nsorted_users = sorted(per_user.items())\n\n# **** Problem *** \n# Since dicionary's per_user={} value is a list() data type we need to change it to a tuple for CSV writability purposes\nremovelist_list=[]\nfor item in sorted_users:\n count_list = item[1]\n new_tuple = (item[0],count_list[0],count_list[1])\n removelist_list.append(new_tuple)\nsorted_users = removelist_list\n\n# Inserting column names as (\"Error\", \"Count\") at the zero index position of the sorted error={} list.\n# Inserting column names as (\"Username\", \"INFO\", \"ERROR\") at the zero index position of the sorted per_user={} list.\nsorted_errors.insert(0, (\"Error\", \"Count\")) \nsorted_users.insert(0, (\"Username\", \"INFO\", \"ERROR\")) \n\n# Testing column names ans sorting \n# print(sorted_errors)\n# print(sorted_users)\n\n# Storing lists in CSV files: sorted_errors into error_message.csv and sorted_users into user_statistics.csv\nwith open('error_message.csv', 'w') as errors_csv:\n writer = csv.writer(errors_csv)\n writer.writerows(sorted_errors)\n\nwith open('user_statistics.csv', 'w') as user_stats_csv:\n writer = csv.writer(user_stats_csv)\n writer.writerows(sorted_users)","sub_path":"Qwiklabs/Log Analysis Using Regular Expressions/ticky_check.py","file_name":"ticky_check.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"340449914","text":"#!/user/bin/env python\r\n# -*- coding:utf-8 -*-\r\n__author__ = \"Zhong Lu\"\r\n\r\n\r\nimport pika\r\nimport gevent\r\nimport uuid\r\nfrom gevent import monkey\r\nfrom conf import settings\r\nfrom core import logger\r\n\r\n\r\n# 默认情况下,把程序直接交给gevent处理,gevent是不知道程序做了IO操作,所以就要给程序打补丁,让gevent能识别出IO操作\r\n# 在程序开头声明下面的代码,作用是把当前程序的所有IO操作都做上标记\r\nmonkey.patch_all()\r\n\r\n\r\n# 获取执行系统shell命令日志的logger对象\r\nexecute_command_logger = logger.logger_function(\"execute_command\")\r\n# 获取查看命令执行结果日志的logger对象\r\nview_command_logger = logger.logger_function(\"view_command\")\r\n\r\n\r\ncommand_result_dict = {} # 承放命令结果的字典\r\n\r\n\r\nclass RpcClient(object):\r\n \"\"\"RPC客户端类\"\"\"\r\n def call(self, ip_address, command):\r\n \"\"\"\r\n 向rabbitmq server队列中发送数据的方法函数\r\n :param ip_address: 需要执行命令的主机ip地址\r\n :param command: 需要执行的命令\r\n :return:\r\n \"\"\"\"\"\r\n # 生成一个随机queue,并生成queue对象\r\n # 因为是广播所以就不指定queue的名字,不指定的话,rabbit会随机分配一个名字。这里也可以自己命令queue的名字\r\n # exclusive是排他的、唯一的,exclusive=True会在使用此queue的消费者断开后,自动将queue删除\r\n queue_obj = self.channel.queue_declare(exclusive=True)\r\n # 获取queue的名字\r\n self.callback_queue = queue_obj.method.queue\r\n print(\"随机生成的队列名为\", self.callback_queue)\r\n\r\n self.corr_id = str(uuid.uuid4()) # 通过随机数来生成UUID,并转换成字符串\r\n\r\n # 以给定的交换(exchange)、路由键(routing_key)和主体(body)发布到通道\r\n self.channel.basic_publish(exchange='',\r\n routing_key=ip_address, # 将消息发送到哪个queue中\r\n properties=pika.BasicProperties(\r\n\r\n # 本端主动告诉对端,将响应本端的结果发送到哪个队列中\r\n reply_to=self.callback_queue,\r\n\r\n # 将在本端生成的UUID发送给对端\r\n correlation_id=self.corr_id,\r\n ),\r\n\r\n body=command # 将命令发送出去\r\n )\r\n\r\n print(\"命令%s已发送到rabbitmq server队列中了\" % command)\r\n print(\"命令%s需要在%s机器上执行\" % (command, ip_address))\r\n\r\n self.get_response()\r\n\r\n # 将单个主机执行命令后的结果放到承放命令结果的字典中\r\n command_result_dict[self.random_key][ip_address] = self.response\r\n\r\n def get_response(self):\r\n \"\"\"\r\n 从rabbitmq server队列中接收消息的方法函数\r\n :return:\r\n \"\"\"\"\"\r\n self.response = None # self.response是承放接收到的消息内容,默认值设置为None\r\n\r\n # 声明接收消息\r\n self.channel.basic_consume(self.on_response, # 如果收到消息,就调用self.on_response方法函数来处理消息\r\n queue=self.callback_queue # 从哪个队列中接收消息\r\n )\r\n\r\n # 如果self.response的值为None就要一直接收消息\r\n while self.response is None:\r\n # 接收消息。不管有没有接收到消息,都不会被block。相当于非阻塞版的start_consuming()\r\n self.connection.process_data_events()\r\n\r\n def on_response(self, ch, method, props, body):\r\n \"\"\"\r\n 接收到消息后触发的回调函数\r\n :param ch: 通道(或频道)的内存对象\r\n :param method: 方法。method中包含了将信息发送给谁的一些信息,例如队列、交换(exchange)\r\n :param props: 属性\r\n :param body: 接收的消息\r\n :return:\r\n \"\"\"\"\"\r\n # 如果本端生成的UUID和对端发送给本端的UUID相同,就代表接收到的消息是正确的\r\n # 本端可以连续给对端发送多条消息,为了保证接收到的结果和发送的消息正确对应上,添加了UUID确认机制\r\n # props.correlation_id就是获取,对端发送给本端的UUID\r\n if self.corr_id == props.correlation_id:\r\n self.response = body # 将接收到的消息内容赋值给self.response\r\n\r\n ch.basic_ack(delivery_tag=method.delivery_tag) # 给rabbitmq server发送确认消息\r\n\r\n\r\nclass Client(RpcClient):\r\n \"\"\"客户端类,继承了RpcClient类\"\"\"\r\n def __init__(self):\r\n # 创建连接对象,并在构建时将连接参数对象传递到连接适配器\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.ip_address))\r\n\r\n # 在连接上创建一个通道\r\n self.channel = self.connection.channel()\r\n\r\n self.interactive()\r\n\r\n def interactive(self):\r\n \"\"\"\r\n 和用户交互的方法函数\r\n :return:\r\n \"\"\"\"\"\r\n self._help()\r\n\r\n while True:\r\n\r\n input_command = input(\"输入命令\\n>>>\").strip()\r\n\r\n if len(input_command) == 0: # 用户输入的命令为空\r\n continue\r\n\r\n action = input_command.split()[0] # 获取用户操作类型,run、check_task、help、exit\r\n\r\n if hasattr(self, \"_%s\" % action):\r\n function = getattr(self, \"_%s\" % action)\r\n function(input_command)\r\n else:\r\n print(\"\\033[1;31m输入的内容有误,请重新输入\\033[0m\")\r\n self._help()\r\n\r\n def _run(self, input_command):\r\n \"\"\"\r\n 解析用户输入的命令,并调用远程机器执行命令的方法函数\r\n :param input_command: 用户输入的命令\r\n :return:\r\n \"\"\"\"\"\r\n if input_command.count(\"--hosts\") != 1:\r\n print(\"\\033[1;31m输入的命令有误,缺少'--hosts'选项,请重新输入\\033[0m\")\r\n return False\r\n elif len(input_command.split(\"\\\"\")) < 4:\r\n print(\"\\033[1;31m输入的命令有误,'run'或'--hosts'后面的参数没有用双引号引起来,请重新输入\\033[0m\")\r\n return False\r\n\r\n command = input_command.split(\"\\\"\")[1].strip() # 获取shell命令,类型为字符串\r\n host_list = input_command.split(\"\\\"\")[3].strip().split() # 获取需要执行shell命令的主机,类型为列表\r\n\r\n if len(command) < 1 or len(host_list) < 1:\r\n print(\"\\033[1;31m输入的命令有误,'run'或'--hosts'后面缺少参数,请重新输入\\033[0m\")\r\n return False\r\n\r\n while True:\r\n # 通过随机数来生成UUID,并转换成字符串\r\n # 生成的UUID用来做承放命令结果字典的key\r\n random_key = str(uuid.uuid4())\r\n\r\n if random_key not in command_result_dict: # 生成的UUID不在承放命令结果字典中\r\n self.random_key = random_key # 将生成的UUID封装到对象中\r\n # 在承放命令结果字典中再初始化一个字典,UUID为key(每一条命令对应唯一一个UUID),初始化的字典为value\r\n command_result_dict[self.random_key] = {}\r\n break\r\n\r\n gevent_list = [] # 承放所要开启协程的列表\r\n for ip_address in host_list:\r\n # 启动协程,并将所有要启动的协程放入列表中\r\n gevent_list.append(gevent.spawn(self.call, ip_address, command))\r\n\r\n gevent.joinall(gevent_list) # 等待所有协程执行完成\r\n\r\n print(\"命令 \\033[1;32m%s\\033[0m \\n\"\r\n \"task id \\033[1;32m%s\\033[0m\" % (command, self.random_key))\r\n\r\n # 保存到日志文件中\r\n execute_command_logger.info(\"对主机%s执行了%s命令,生成的id是%s\" % (host_list, command, self.random_key))\r\n\r\n return True\r\n\r\n def _check_task(self, input_command):\r\n \"\"\"\r\n 解析用户输入的命令,并查看命令执行结果的方法函数\r\n :param input_command: 用户输入的命令\r\n :return:\r\n \"\"\"\"\"\r\n command_list = input_command.split()\r\n if len(command_list) < 2:\r\n print(\"\\033[1;31m输入的命令有误,缺少id,请重新输入\\033[0m\")\r\n return False\r\n\r\n command_id = command_list[1]\r\n if command_id not in command_result_dict:\r\n print(\"\\033[1;31m输入的id不存在,请重新输入\\033[0m\")\r\n return False\r\n\r\n result_dict = command_result_dict[command_id]\r\n for items in result_dict:\r\n print(\"-----\\033[1;32m%s\\033[0m\\n\"\r\n \"%s\" % (items, result_dict[items].decode(encoding=\"utf-8\")))\r\n\r\n # 保存到日志文件中\r\n view_command_logger.info(\"用户查看了id为%s所对应的命令结果\" % command_id)\r\n\r\n del command_result_dict[command_id] # 将指定的key从字典中删除\r\n\r\n def _help(self, *args):\r\n \"\"\"\r\n 使用帮助的方法函数\r\n :param args: 扩展参数\r\n :return:\r\n \"\"\"\"\"\r\n help_info = r\"\"\"\r\n\r\n 程序使用帮助信息:\r\n\r\n -- 在远程机器上执行系统shell命令\r\n 命令格式:run \"shell_command\" --hosts \"ip_address ...\"\r\n 注意,shell_command和ip_address必须要用双引号引起来\r\n 命令示例:\r\n run \"ipconfig\" --hosts \"192.168.0.23\":在192.168.0.23机器上执行ipconfig命令\r\n run \"ipconfig\" --hosts \"192.168.0.23 192.168.157.128\":在192.168.0.23和192.168.157.128两台机器上分别执行ipconfig命令\r\n\r\n -- 查看系统shell命令执行结果\r\n 查看id对应的命令在远程机器上执行的结果\r\n 命令格式:check_task id\r\n 命令示例:\r\n check_task 26106e37-8f0f-478d-b7b0-c0e598b72719\r\n\r\n -- 结束程序运行,退出整个程序\r\n 命令示例:\r\n exit\r\n\r\n -- 查看程序使用帮助信息\r\n 命令示例:\r\n help\r\n \"\"\"\r\n print(help_info)\r\n\r\n def _exit(self, *args):\r\n \"\"\"\r\n 退出程序的方法函数\r\n :param args: 扩展参数\r\n :return:\r\n \"\"\"\"\"\r\n self.connection.close() # 关闭连接\r\n exit(\"程序退出\")\r\n","sub_path":"day11/Host_management_based_on_RabbitMQ_RPC/client/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"59896891","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport json\nfrom Constant import constants\nfrom Models import PDFInfo\n\n\ndef find_events_link(past_events):\n events_link = []\n for p in past_events:\n events_link.append(p.find(\"a\")[\"href\"])\n return events_link\n\n\ndef find_pdf_info(pdfs, title, date, past_events_info):\n for pdf in pdfs:\n pdf_title = pdf.get_text()\n href = pdf[\"href\"]\n pdf_info = PDFInfo.PDFInfo(title, date, pdf_title, href)\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef find_past_events_info(events_link):\n past_events_info = [] # title, date, [{\"pdfTitle\": \"pdfLink\"}]\n\n for link in events_link:\n address = requests.get(link)\n soup_address = bs(address.content)\n title = soup_address.find(\"div\", attrs={\"class\": \"wd_title wd_event_title detail_header\"}).get_text()\n date = soup_address.find(\"div\", attrs={\"class\": \"item_date wd_event_sidebar_item wd_event_date\"}).get_text()\n pdfs = soup_address.select(\"div.wd_event_info a\")\n pdf_info_list = find_pdf_info(pdfs, title, date, past_events_info)\n # past_event_info = EventCompleteInfo.EventCompleteInfo(title, date, pdf_info_list)\n # past_events_info.append(pdf_info_list)\n return past_events_info\n\n\ndef find_past_events_info_for_fb(json_object):\n past_events_info = []\n\n for event in json_object[\"GetEventListResult\"]:\n attachments = event[\"Attachments\"]\n for attachment in attachments:\n pdf_info = PDFInfo.PDFInfo(event[\"Title\"], event[\"EndDate\"], attachment[\"Title\"], attachment[\"Url\"])\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef find_past_events_info_for_informa(json_object):\n past_events_info = []\n\n for event in json_object[\"files\"]:\n pdf_info = PDFInfo.PDFInfo(event[\"category\"], event[\"updated_at\"], event[\"title\"], event[\"url\"])\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef get_data_for_homedot(href):\n headers = {\n \"cookie\": \"ASP.NET_SessionId=54fdvkoft30df3b4v1ego1eq; _ga=GA1.2.187890047.1629194699; _gid=GA1.2.1071652028.1629365143; AWSALB=nYuj3j/TH85GACNKznQvFiSvE/t0lcIUG2BOmMLFVhCcUWndQn3G2MB31kWcd2UxReaQRUzsycMCkLSSMshGN3Uh7li2AZTuYzAYLHKWYJBMzUzrd1E5gL4Ymwbu; AWSALBCORS=nYuj3j/TH85GACNKznQvFiSvE/t0lcIUG2BOmMLFVhCcUWndQn3G2MB31kWcd2UxReaQRUzsycMCkLSSMshGN3Uh7li2AZTuYzAYLHKWYJBMzUzrd1E5gL4Ymwbu; _gat=1; _gat_INVDSitecore=1\",\n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36\",\n }\n address = requests.get(href, headers=headers)\n data = bs(address.content)\n return data\n\n\ndef scrap_for_homedepot(href, parent):\n past_events_info = []\n data = get_data_for_homedot(href)\n if parent:\n temp = data.find(\"div\", attrs={\"class\": \"snapdown-container board-container clearfix past-container\"})\n else:\n temp = data\n events = temp.findChildren(\"div\", recursive=False)\n for event in events:\n date = event.find(\"span\", attrs={\"class\": \"event-title\"})\n event_title = event.find(\"span\", attrs={\"class\": \"event-date\"})\n if date is None or event_title is None:\n return past_events_info\n date = date.get_text()\n event_title = event_title.get_text()\n pdfs = event.select(\"div.snapdown-content.member-description.clearfix\")\n for pdf in pdfs:\n if pdf.find(\"a\")[\"href\"]:\n pdf_link = pdf.find(\"a\")[\"href\"]\n pdf_title = pdf.find(\"a\").get_text()\n pdf_info = PDFInfo.PDFInfo(date, event_title, pdf_title, pdf_link)\n past_events_info.append(pdf_info.__dict__)\n return past_events_info\n\n\ndef scrap(event_id, href):\n if event_id == \"id1\":\n return scrap_for_weyerhaeuser(href)\n elif event_id == \"id2\":\n return scrap_for_fb(href)\n elif event_id == \"id3\":\n event_data = []\n for link in constants.events_informa:\n event_info = scrap_for_informa(link)\n for event in event_info:\n event_data.append(event)\n return event_data\n elif event_id == \"id4\":\n past_events_info = []\n past_events = scrap_for_homedepot(href, True)\n for event in past_events:\n past_events_info.append(event)\n data = get_data_for_homedot(href)\n other_pages = data.find_all(\"li\", attrs={\"class\": \"next\"})\n for page in other_pages:\n link = page.find(\"a\")\n if link is None:\n continue\n link = link[\"href\"]\n res = \"https://ir.homedepot.com/\" + link[: 26] + \"async=1&\" + link[26:]\n past_events = scrap_for_homedepot(res, False)\n for event in past_events:\n past_events_info.append(event)\n return past_events_info\n\n\ndef scrap_for_weyerhaeuser(href):\n r = requests.get(href)\n soup = bs(r.content)\n pastEvents = soup.select(\"div.wd_event\")\n eventsLink = find_events_link(pastEvents)\n return find_past_events_info(eventsLink)\n\n\ndef scrap_for_fb(href):\n address = requests.get(href)\n return find_past_events_info_for_fb(address.json())\n\n\ndef scrap_for_informa(href):\n address = requests.get(href)\n s = address.text\n my_prefix = \"jQuery1830500746899123085_1629202578914(\"\n s = s[len(my_prefix):]\n s = s[:-1]\n s = json.loads(s)\n return find_past_events_info_for_informa(s)\n","sub_path":"Service/document_scrap.py","file_name":"document_scrap.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"10006786","text":"import numpy as np\n\n\nclass Feature:\n _keys = [\n 'doc_cos_1',\n 'doc_cos_2',\n 'sent_cos_1',\n 'sent_cos_2',\n 'sent_cos_3',\n ]\n\n @classmethod\n def size(cls):\n return len(cls._keys)\n\n def __init__(self):\n super().__setattr__('data', np.zeros((len(Feature._keys),), dtype=np.float64))\n\n def __getattr__(self, item):\n if item in Feature._keys:\n return self.data[Feature._keys.index(item)]\n else:\n raise AttributeError\n\n def __setattr__(self, key, value):\n if key in Feature._keys:\n self.data[Feature._keys.index(key)] = value\n else:\n raise AttributeError\n\n","sub_path":"feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"447624141","text":"\"\"\"\nAuthor: Travis Hammond\nVersion: 1_1_2020\n\"\"\"\n\n\nimport os\nimport datetime\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import model_from_json\n\ntry:\n from utils.neural_network import (\n Trainner, Predictor, dense, conv2d\n )\n from utils.util_funcs import load_directory_dataset, load_h5py\nexcept ImportError:\n from neural_network import (\n Trainner, Predictor, dense, conv2d\n )\n from util_funcs import load_directory_dataset, load_h5py\n\n\nclass GANTrainner(Trainner):\n \"\"\"Generative Adversarial Network Trainner is used for loading, saving,\n and training keras GAN models.\n \"\"\"\n\n def __init__(self, model, dis_model, train_data, file_loader=None,\n conditional=False, normal_distribution=False):\n \"\"\"Initializes train, validation, and test data.\n params:\n model: A compiled keras model, which is the generator\n (loss function does not matter)\n dis_model: A compiled keras model, which is the discriminator\n (loss function does not matter)\n train_data: A dictionary, numpy ndarray, string/path\n containg train data, or a list with x\n and y ndarrays (Ex. {'train_x': [...]})\n file_loader: A function for loading each file\n conditional: A boolean, which determines if the GAN is a\n conditional GAN and neededs y data\n normal_distribution: A boolean, which determines if the\n model should be trained with normal\n or uniform random values\n \"\"\"\n assert isinstance(train_data, (str, dict, np.ndarray, list)), (\n 'train_data must be a dictionary, a file/folder path, a ndarray, '\n 'or a list with two ndarrays'\n )\n self.model = model\n self.input_shape = self.model.layers[0].input_shape[0][1:]\n self.optimizer = model.optimizer\n self.dis_model = dis_model\n self.dis_optimizer = dis_model.optimizer\n self.metric = tf.keras.metrics.Mean(name='loss')\n self.dis_metric = tf.keras.metrics.Mean(name='dis_loss')\n self.train_data = train_data\n self.conditional = conditional\n self.normal_distribution = normal_distribution\n\n if (not isinstance(train_data, np.ndarray) and\n (self.conditional and not\n isinstance(train_data[0], np.ndarray))):\n if isinstance(train_data, str):\n if os.path.isdir(train_data):\n assert file_loader is not None\n if self.conditional:\n data = load_directory_dataset(\n train_data, file_loader\n )\n train_data = [data['train_x'], data['train_y']]\n else:\n train_data = load_directory_dataset(\n train_data, file_loader\n )['train_x']\n else:\n assert train_data.split('.')[1] == 'h5'\n train_data = load_h5py(train_data)\n if isinstance(train_data, dict):\n if 'train_x' in train_data:\n if self.conditional:\n self.train_data = [train_data['train_x'],\n train_data['train_y']]\n else:\n self.train_data = train_data['train_x']\n else:\n raise Exception('There must be a train dataset')\n else:\n raise ValueError('Invalid train_data')\n if self.conditional:\n self.train_data[0] = self.train_data[0].astype(\n tf.keras.backend.floatx()\n )\n self.train_data[1] = self.train_data[1].astype(\n tf.keras.backend.floatx()\n )\n else:\n self.train_data = self.train_data.astype(\n tf.keras.backend.floatx()\n )\n\n @tf.function\n def _train_step(self, x):\n \"\"\"Trains the GAN 1 epoch.\n params:\n x: A Tensor\n y: A Tensor\n \"\"\"\n if self.conditional:\n length = x[0].shape[0]\n else:\n length = x.shape[0]\n if self.normal_distribution:\n inputs = tf.random.normal([length,\n *self.input_shape])\n else:\n inputs = tf.random.uniform([length,\n *self.input_shape])\n if self.conditional:\n inputs = [inputs, x[1]]\n with tf.GradientTape() as tape, tf.GradientTape() as dis_tape:\n preds = self.model(inputs, training=True)\n if len(self.model.losses) > 0:\n reg_loss = tf.math.add_n(self.model.losses)\n else:\n reg_loss = 0\n if self.conditional:\n preds = [preds, x[1]]\n dis_preds = self.dis_model(preds, training=True)\n dis_real_preds = self.dis_model(x, training=True)\n if len(self.dis_model.losses) > 0:\n dis_reg_loss = tf.math.add_n(self.dis_model.losses)\n else:\n dis_reg_loss = 0\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.ones_like(dis_preds), dis_preds\n ) + reg_loss\n dis_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.zeros_like(dis_preds), dis_preds\n )\n dis_real_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n tf.ones_like(dis_real_preds), dis_real_preds\n )\n total_dis_loss = dis_loss + dis_real_loss + dis_reg_loss\n grads = tape.gradient(loss, self.model.trainable_variables)\n dis_grads = dis_tape.gradient(total_dis_loss,\n self.dis_model.trainable_variables)\n\n self.optimizer.apply_gradients(\n zip(grads, self.model.trainable_variables)\n )\n self.dis_optimizer.apply_gradients(\n zip(dis_grads, self.dis_model.trainable_variables)\n )\n\n self.metric(loss)\n self.dis_metric(total_dis_loss)\n\n def train(self, epochs, batch_size=None, verbose=True):\n \"\"\"Trains the keras model.\n params:\n epochs: An integer, which is the number of complete\n iterations to train\n batch_size: An integer, which is the number of samples\n per graident update\n verbose: A boolean, which determines the verbositiy level\n \"\"\"\n\n if self.conditional:\n length = self.train_data[0].shape[0]\n batches = tf.data.Dataset.from_tensor_slices(\n (self.train_data[0],\n self.train_data[1])\n ).shuffle(length).batch(batch_size)\n else:\n length = self.train_data.shape[0]\n batches = tf.data.Dataset.from_tensor_slices(\n self.train_data\n ).shuffle(length).batch(batch_size)\n for epoch in range(1, epochs + 1):\n if verbose:\n print(f'Epoch {epoch}/{epochs}')\n count = 0\n for batch in batches:\n self._train_step(batch)\n count += np.minimum(batch_size, length - count)\n print(f'{count}/{length}', end='\\r')\n if verbose:\n print(f'{count}/{length} - '\n f'loss: {self.metric.result()} - '\n f'dis_loss: {self.dis_metric.result()}')\n self.metric.reset_states()\n self.dis_metric.reset_states()\n\n def load(self, path, optimizer='sgd', dis_optimizer='sgd'):\n \"\"\"Loads a generator and discriminator model and weights from a file.\n (overrides the inital provided model)\n params:\n path: A string, which is the path to a folder\n containing model.json, weights.h5, and note.txt\n optimizer: A string or optimizer instance, which will be\n the optimizer for the loaded generator model\n dis_optimizer: A string or optimizer instance, which will be\n the optimizer for the loaded discriminator model\n \"\"\"\n with open(os.path.join(path, 'model.json'), 'r') as file:\n self.model = model_from_json(file.read())\n self.model.optimizer = optimizer\n with open(os.path.join(path, 'dis_model.json'), 'r') as file:\n self.dis_model = model_from_json(file.read())\n self.dis_model.optimizer = dis_optimizer\n self.model.load_weights(os.path.join(path, 'weights.h5'))\n self.dis_model.load_weights(os.path.join(path, 'dis_weights.h5'))\n with open(os.path.join(path, 'note.txt'), 'r') as file:\n print(file.read(), end='')\n\n def save(self, path, note=None):\n \"\"\"Saves the generator and discriminator model and weights to a file.\n params:\n path: A string, which is the path to create a folder in\n containing model.json, weights.h5, note.txt,\n dis_model.json, and dis_weights.h5\n return: A string, which is the given path + folder name\n \"\"\"\n time = datetime.datetime.now()\n path = os.path.join(path, time.strftime(r'%Y%m%d_%H%M%S_%f'))\n os.mkdir(path)\n self.model.save_weights(os.path.join(path, 'weights.h5'))\n self.dis_model.save_weights(os.path.join(path, 'dis_weights.h5'))\n with open(os.path.join(path, 'model.json'), 'w') as file:\n file.write(self.model.to_json())\n with open(os.path.join(path, 'dis_model.json'), 'w') as file:\n file.write(self.dis_model.to_json())\n with open(os.path.join(path, 'note.txt'), 'w') as file:\n if note is None:\n self.model.summary(print_fn=lambda line: file.write(line+'\\n'))\n else:\n file.write(note)\n return path\n\n\nclass GANPredictor(Predictor):\n \"\"\"Generative Adversarial Network Predictor is used for\n loading and predicting keras GAN models.\n \"\"\"\n\n def predict(self, x, y=None):\n \"\"\"Predicts on a single sample.\n params:\n x: A single model input\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n if y is None:\n return self.model.predict(np.expand_dims(x, axis=0))[0]\n return self.model.predict([np.expand_dims(x, axis=0),\n np.expand_dims(y, axis=0)])[0]\n\n def predict_all(self, x, y=None, batch_size=None):\n \"\"\"Predicts on many samples.\n params:\n x: A ndarray of model inputs\n y: A ndarray of model conditional inputs\n return: A result from the model output\n \"\"\"\n if y is None:\n return self.model.predict(x, batch_size=batch_size)\n return self.model.predict([x, y], batch_size=batch_size)\n\n def random_normal_predict(self, y=None):\n \"\"\"Predicts an output with a random normal distribution.\n params:\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n input_shape = self.model.layers[0].input_shape[0][1:]\n normal = tf.random.normal([1, *input_shape])\n if y is None:\n return self.model.predict(normal)[0]\n return self.model.predict([normal,\n np.expand_dims(y, axis=0)])[0]\n\n def random_uniform_predict(self, y=None):\n \"\"\"Predicts an output with a random uniform distribution.\n params:\n y: A single model conditional input\n return: A result from the model output\n \"\"\"\n input_shape = self.model.layers[0].input_shape[0][1:]\n uniform = tf.random.uniform([1, *input_shape])\n if y is None:\n return self.model.predict(uniform)[0]\n return self.model.predict([uniform,\n np.expand_dims(y, axis=0)])[0]\n\n\nif __name__ == '__main__':\n import image as img\n from time import sleep\n\n training = False\n conditional = True\n path = 'trained_conditional' if conditional else 'trained'\n\n (tx, ty), _ = keras.datasets.fashion_mnist.load_data()\n tx = np.expand_dims((tx - 127.5) / 127.5, axis=-1)\n if conditional:\n labels = ['T-shirt', 'Trouser', 'Pullover', 'Dress',\n 'Coat', 'Sandal', 'Shirt', 'Sneaker',\n 'Bag', 'Ankle boot']\n ty = np.identity(len(labels))[ty]\n tx = [tx, ty]\n\n if training:\n if conditional:\n # Generator Model\n inputs = keras.layers.Input(shape=(100))\n x1 = dense(512)(inputs)\n cond_inputs = keras.layers.Input(shape=(len(labels)))\n x2 = dense(512)(cond_inputs)\n x = keras.layers.Concatenate()([x1, x2])\n x = dense(7*7*32)(x)\n x = keras.layers.Reshape((7, 7, 32))(x)\n x = conv2d(128, 3, strides=1)(x)\n x = conv2d(64, 3, strides=2, transpose=True)(x)\n outputs = conv2d(1, 3, strides=2, \n activation='tanh', batch_norm=False,\n transpose=True)(x)\n model = keras.Model(inputs=[inputs, cond_inputs],\n outputs=outputs)\n model.summary()\n optimizer = tf.keras.optimizers.Adam(.0002, .5)\n model.optimizer = optimizer\n\n # Discriminator Model\n inputs = keras.layers.Input(shape=(28, 28, 1))\n cond_inputs = keras.layers.Input(shape=(len(labels)))\n x = conv2d(64, 3, strides=2, activation=None,\n batch_norm=False)(inputs)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(128, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(256, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(512, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n x2 = dense(1024, activation=None)(cond_inputs)\n x2 = keras.layers.LeakyReLU(alpha=0.2)(x2)\n x = keras.layers.Concatenate()([x, x2])\n x = dense(1024, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n outputs = dense(1, activation=None, batch_norm=False)(x)\n dis_model = keras.Model(inputs=[inputs, cond_inputs],\n outputs=outputs)\n dis_model.summary()\n dis_optimizer = tf.keras.optimizers.Adam(.0002, .5)\n dis_model.optimizer = dis_optimizer\n else:\n # Generator Model\n inputs = keras.layers.Input(shape=(100))\n x = dense(7*7*32)(inputs)\n x = keras.layers.Reshape((7, 7, 32))(x)\n x = conv2d(128, 3, strides=1)(x)\n x = conv2d(64, 3, strides=2, transpose=True)(x)\n outputs = conv2d(1, 3, strides=2,\n activation='tanh', batch_norm=False,\n transpose=True)(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n model.summary()\n optimizer = tf.keras.optimizers.Adam(.0002, .5)\n # model.compile(optimizer=optimizer, loss='mse')\n model.optimizer = optimizer\n\n # Discriminator Model\n inputs = keras.layers.Input(shape=(28, 28, 1))\n x = conv2d(64, 3, strides=2, activation=None,\n batch_norm=False)(inputs)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(128, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(256, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = conv2d(512, 3, strides=2, activation=None)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n outputs = dense(1, activation=None, batch_norm=False)(x)\n dis_model = keras.Model(inputs=inputs, outputs=outputs)\n dis_model.summary()\n dis_optimizer = tf.keras.optimizers.Adam(.0002, .5)\n dis_model.optimizer = dis_optimizer\n\n gant = GANTrainner(model, dis_model, tx,\n conditional=conditional)\n if path is not None:\n gant.load(path)\n gant.train(50, 512)\n path = gant.save('')\n gant.load(path, optimizer=optimizer, dis_optimizer=dis_optimizer)\n\n del gant\n\n ganp = GANPredictor(path)\n\n ws = img.Windows()\n w = ws.add('Image')\n ws.start()\n\n while True:\n if conditional:\n identity = np.identity(len(labels))\n for ndx in range(len(labels)):\n preds = ganp.random_uniform_predict(identity[ndx])\n preds = np.squeeze(preds * 127.5 + 127.5).astype(np.uint8)\n ws.set(w, preds)\n print(labels[ndx])\n sleep(2)\n else:\n preds = ganp.random_uniform_predict() * 127.5 + 127.5\n preds = np.squeeze(preds).astype(np.uint8)\n ws.set(w, preds)\n sleep(1)\n\n ws.stop()\n","sub_path":"video12/gan_network.py","file_name":"gan_network.py","file_ext":"py","file_size_in_byte":17606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"539165967","text":"import argparse, os, random\n\n\nclass Board:\n def __init__(self, n):\n self.size = n\n self.cells = [[' ' for _ in range(n)] for _ in range(n)]\n self.winning_lines = []\n \n # add winning rows\n for i in range(n):\n self.winning_lines.append([])\n for j in range(n):\n self.winning_lines[-1].append((i, j))\n \n # add winning columns\n for i in range(n):\n self.winning_lines.append([])\n for j in range(n):\n self.winning_lines[-1].append((j, i))\n \n # add winning diagonal lines\n self.winning_lines.append([])\n for i in range(n):\n self.winning_lines[-1].append((i, i))\n self.winning_lines.append([])\n for i in range(n):\n self.winning_lines[-1].append(((n-1)-i, i))\n\n def __str__(self):\n result = ''\n for i in range(self.size):\n # print top part of row\n line = ' '\n for _ in range(self.size):\n line += '+---'\n line += '+\\n'\n result += line\n\n # print middle section of row\n line = str(self.size - 1 - i) + ' '\n for j in range(self.size):\n line += '| ' + self.cells[i][j] + ' '\n line += '|\\n'\n result += line\n\n # print bottom line of bottom row with labels\n line = ' '\n labels = ' '\n for i in range(self.size):\n line += '+---'\n labels += str(i) + ' '\n line += '+\\n'\n result += line + labels\n\n return result\n\n def get_open_cells(self):\n open_cells = []\n for i in range(len(self.cells)):\n for j in range(len(self.cells[i])):\n if self.cells[i][j] == ' ':\n open_cells.append((i, j))\n return open_cells\n\n def compute_danger(self):\n # for simplicity, we only compute player x danger\n # = sum of (1/2)^(# of open cells remaining\n # in line that contains no o's)\n danger = 0\n for line in self.winning_lines:\n if 'O' not in line:\n danger += (1/2)**(len(line) - line.count('X'))\n return danger\n\n def get_optimal_move(self):\n # maximize the value we subtract from the \"danger\" function\n open_cells = self.get_open_cells()\n best_move = open_cells[0]\n best_move_val = 0\n\n for pos in open_cells:\n val = 0\n for line in self.winning_lines:\n if pos in line and 'O' not in line:\n val += (1/2)**(len(line) - line.count('X'))\n if val > best_move_val:\n best_move = pos\n best_move_val = val\n\n return best_move\n\n def add_move(self, position, symbol):\n # in all lines, replace position with symbol\n for i in range(len(self.winning_lines)):\n for j in range(len(self.winning_lines[i])):\n if self.winning_lines[i][j] == position:\n self.winning_lines[i][j] = symbol\n\n # remove winning lines that contain both symbols\n temp = list(filter(lambda lst: not('X' in lst and 'O' in lst), self.winning_lines))\n self.winning_lines = temp\n\n # add move to board\n self.cells[position[0]][position[1]] = symbol\n\n def check_for_win(self):\n if len(self.winning_lines) == 0:\n return True, 'No one'\n\n for line in self.winning_lines:\n if line.count('X') == len(line):\n return True, 'Player X'\n elif line.count('O') == len(line):\n return True, 'Player O'\n return False, None\n\n\nclass Player:\n def __init__(self, symbol, board):\n self.symbol = symbol\n self.board = board\n\n def add_move(self, position):\n self.board.add_move(position, self.symbol)\n\n def choose_move(self):\n while True:\n try:\n pos = input('Please enter x,y coordinates: ').split(',')\n pos = convert_to_working(tuple(int(c.strip(\"()[] \")) for c in pos), self.board)\n if pos in self.board.get_open_cells():\n self.board.add_move(pos, self.symbol)\n break\n else:\n print('Cell not available.')\n except:\n print('Cannot read input. Please try again.')\n\n def random_move(self):\n open_cells = self.board.get_open_cells()\n random_index = random.randint(0, len(open_cells)-1)\n position = open_cells[random_index]\n self.board.add_move(position, self.symbol)\n\n\ndef convert_to_display(coordinates, board):\n x = coordinates[1]\n y = (board.size - 1) - coordinates[0]\n return (x, y)\n\n\ndef convert_to_working(coordinates, board):\n x = (board.size - 1) - coordinates[1]\n y = coordinates[0]\n return (x, y)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('size', type=int, help='size of board (height/width')\n args = parser.parse_args()\n size = args.size\n\n board = Board(size)\n player_x = Player('X', board)\n player_o = Player('O', board)\n game_over = False\n winner = None\n\n while True:\n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Player X, choose an option:')\n opt = input('(1) Enter a move\\n(2) Random move\\n(x) Exit\\n').strip()\n if opt == '1':\n player_x.choose_move()\n elif opt == '2':\n player_x.random_move()\n elif opt == 'x':\n exit(0)\n\n game_over, winner = board.check_for_win()\n if game_over:\n break\n\n potential = board.compute_danger()\n optimal_move = board.get_optimal_move()\n \n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Erdos-Selfridge potential: ' + str(potential))\n print('Optimal move: ' + str(convert_to_display(optimal_move, board)) + '\\n')\n\n print('Player O, choose an option:')\n opt = input('(1) Enter a move\\n(2) Random move\\n(3) Potential strategy\\n(x) Exit\\n').strip()\n if opt == '1':\n player_o.choose_move()\n elif opt == '2':\n player_o.random_move()\n elif opt == '3':\n player_o.add_move(optimal_move)\n elif opt == 'x':\n exit(0)\n\n game_over, winner = board.check_for_win()\n if game_over:\n break\n\n os.system('clear') # clear screen before printing updated grid\n print(board)\n print('Game over. ' + str(winner) + ' wins!')\n\n","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"140133466","text":"from itertools import count\nimport json\nimport os\nimport struct\nimport sys\n\ndef main(src_dir, build_dir, out_file):\n index = []\n paths = []\n hidden_deps = set()\n\n src = lambda path: os.path.join(src_dir, path)\n build = lambda path: os.path.join(build_dir, path)\n\n def add(ty, name, path, hide_dep=False):\n size = os.stat(path).st_size\n\n index.append({\n 'name': name,\n 'length': size,\n 'type': ty,\n })\n paths.append(path)\n\n if hide_dep:\n hidden_deps.add(path)\n\n add('image', 'tiles', build('tiles.png'))\n add('image', 'fonts', build('fonts.png'))\n add('image', 'items_img', build('items.png'))\n add('image', 'ui_atlas', build('ui_atlas.png'))\n\n add('binary', 'client_data', build('client_data.bin'))\n\n add('text', 'sprite.vert', src('assets/shaders/sprite.vert'))\n add('text', 'sprite.frag', src('assets/shaders/sprite.frag'))\n add('text', 'app_pony.frag', src('assets/shaders/app_pony.frag'))\n add('text', 'cursor.frag', src('assets/shaders/cursor.frag'))\n add('text', 'cursor.vert', src('assets/shaders/cursor.vert'))\n\n add('text', 'blit_post.frag', src('assets/shaders/blit_post.frag'))\n add('text', 'blit_output.frag', src('assets/shaders/blit_output.frag'))\n add('text', 'blend_layers.frag', src('assets/shaders/blend_layers.frag'))\n add('text', 'blit_fullscreen.vert', src('assets/shaders/blit_fullscreen.vert'))\n\n add('text', 'terrain2.frag', src('assets/shaders/terrain2.frag'))\n add('text', 'terrain2.vert', src('assets/shaders/terrain2.vert'))\n add('text', 'structure2.frag', src('assets/shaders/structure2.frag'))\n add('text', 'structure2.vert', src('assets/shaders/structure2.vert'))\n add('text', 'light2.frag', src('assets/shaders/light2.frag'))\n add('text', 'light2.vert', src('assets/shaders/light2.vert'))\n add('text', 'entity2.frag', src('assets/shaders/entity2.frag'))\n add('text', 'entity2.vert', src('assets/shaders/entity2.vert'))\n add('text', 'slicing.inc', src('assets/shaders/slicing.inc'))\n\n add('text', 'debug_graph.vert', src('assets/shaders/debug_graph.vert'))\n add('text', 'debug_graph.frag', src('assets/shaders/debug_graph.frag'))\n\n add('text', 'ui_blit.vert', src('assets/shaders/ui_blit.vert'))\n add('text', 'ui_blit.frag', src('assets/shaders/ui_blit.frag'))\n add('text', 'ui_blit_tiled.vert', src('assets/shaders/ui_blit_tiled.vert'))\n add('text', 'ui_blit_tiled.frag', src('assets/shaders/ui_blit_tiled.frag'))\n add('text', 'ui_blit2.vert', src('assets/shaders/ui_blit2.vert'))\n add('text', 'ui_blit2.frag', src('assets/shaders/ui_blit2.frag'))\n\n\n with open(build('structures_list.json')) as f:\n structures_list = json.load(f)\n for s in structures_list:\n add('image', s, build(s + '.png'))\n\n with open(build('sprites_list.json')) as f:\n sprites_list = json.load(f)\n for f in sprites_list:\n dest, _ = os.path.splitext(os.path.basename(f))\n add('image', dest, build(os.path.join('sprites', f)))\n\n\n # Generate the pack containing the files added above.\n\n offset = 0\n for entry in index:\n entry['offset'] = offset\n offset += entry['length']\n\n\n index_str = json.dumps(index)\n index_len = len(index_str.encode())\n\n with open(out_file, 'wb') as f:\n f.write(struct.pack(''+temp_id+url)\r\n\r\ndef ajax(request):\r\n if request.method == \"POST\":\r\n print(request.POST['name'],request.POST['id'])\r\n name = request.POST['name']\r\n av_id = request.POST['id']\r\n # response_data = {'name':name,'av_id':av_id}\r\n # return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\r\n \r\n url = 'https://www.dmm.co.jp/digital/videoa/-/list/narrow/=/article=actress/id='+av_id+'/limit=30/n1=DgRJTglEBQ4GpoD6,YyI,qs_/'\r\n # print(url)\r\n html = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(html, 'html.parser')\r\n src = soup.find_all('p',class_=\"tmb\")\r\n \r\n\r\n pic_url = []\r\n for j in src:\r\n temp = j.find_all('img')[0].get_attribute_list('src')[0]\r\n pic_url.append(temp)\r\n \r\n context = {\r\n 'name' : name,\r\n 'pic_url' : pic_url,\r\n }\r\n return HttpResponse(json.dumps(context), content_type=\"application/json\")","sub_path":"faceApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"394891869","text":"\n\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n path('Home/',views.homepage,name='homepage'),\n path('write-story/',views.startStory,name='startStory'),\n path('submit-story/',views.submitStory,name='submitStory'),\n path('imageupload/',views.imageupload,name='imageupload'),\n path('story-list/',views.listOfStories,name='listOfStories'),\n url(r'^login/$', views.LoginFormView.as_view(), name='login'),\n url(r'^register/$', views.UserFormView.as_view(), name='register'),\n url(r'^logout/$', views.logout_user, name='logout_user'),\n path('update_rating/',views.update_rating,name='update_rating'),\n path('submit-review/',views.submit_review,name='submit-review'),\n url(r'^(?P[0-9a-zA-Z\\s]+)/(?P[0-9a-zA-Z\\s]+)/read-story/$', views.readStory,name='readStory'),\n]\n\n\n","sub_path":"storiesandstuff/stories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"532132996","text":"import MySQLdb\nimport os\nimport string\n\ndb = MySQLdb.connect (host=\"localhost\",\n user=\"root\",\n passwd=\"Abc@1234\",\n db=\"email_verify\",\n local_infile = 1) #Grants permission to write to db from an input file. Without this you get sql Error: (1148, 'The used command is not allowed with this MySQL version')\n\nprint(\"\\nConnection to DB established\\n\")\n\n#The statement 'IGNORE 1 LINES' below makes the Python script ignore first line on csv file\n#You can execute the sql below on the mysql bash to test if it works\nsqlLoadData = \"\"\"load data local infile 'series_mbl_vcon_circle.csv' into table Series_Mbl FIELDS TERMINATED BY ',' \n ENCLOSED BY '\"' LINES TERMINATED BY '\\n' IGNORE 1 LINES;\"\"\"\n\ntry:\n curs = db.cursor() \n curs.execute(sqlLoadData)\n db.commit() \n print(\"SQL execution complete\") \n resultSet = curs.fetchall() \nexcept: \n print(\"Error incurred: \") \n db.rollback()\n db.close()\nprint(\"Data loading complete.\\n\")","sub_path":"html-file/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"421162736","text":"\"\"\"\nHernandez-Andres, Lee and Romero (1999) Correlated Colour Temperature\n=====================================================================\n\nDefines the *Hernandez-Andres et al. (1999)* correlated colour temperature\n:math:`T_{cp}` computations objects:\n\n- :func:`colour.temperature.xy_to_CCT_Hernandez1999`: Correlated colour\n temperature :math:`T_{cp}` computation of given *CIE xy* chromaticity\n coordinates using *Hernandez-Andres, Lee and Romero (1999)* method.\n- :func:`colour.temperature.CCT_to_xy_Hernandez1999`: *CIE xy* chromaticity\n coordinates computation of given correlated colour temperature\n :math:`T_{cp}` using *Hernandez-Andres, Lee and Romero (1999)* method.\n\nReferences\n----------\n- :cite:`Hernandez-Andres1999a` : Hernández-Andrés, J., Lee, R. L., &\n Romero, J. (1999). Calculating correlated color temperatures across the\n entire gamut of daylight and skylight chromaticities. Applied Optics,\n 38(27), 5703. doi:10.1364/AO.38.005703\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom scipy.optimize import minimize\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import (\n ArrayLike,\n Dict,\n FloatingOrArrayLike,\n FloatingOrNDArray,\n NDArray,\n Optional,\n)\nfrom colour.utilities import as_float_array, as_float, tsplit, usage_warning\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"colour-developers@colour-science.org\"\n__status__ = \"Production\"\n\n__all__ = [\n \"xy_to_CCT_Hernandez1999\",\n \"CCT_to_xy_Hernandez1999\",\n]\n\n\ndef xy_to_CCT_Hernandez1999(xy: ArrayLike) -> FloatingOrNDArray:\n \"\"\"\n Return the correlated colour temperature :math:`T_{cp}` from given\n *CIE xy* chromaticity coordinates using *Hernandez-Andres et al. (1999)*\n method.\n\n Parameters\n ----------\n xy\n *CIE xy* chromaticity coordinates.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Correlated colour temperature :math:`T_{cp}`.\n\n References\n ----------\n :cite:`Hernandez-Andres1999a`\n\n Examples\n --------\n >>> xy = np.array([0.31270, 0.32900])\n >>> xy_to_CCT_Hernandez1999(xy) # doctest: +ELLIPSIS\n 6500.7420431...\n \"\"\"\n\n x, y = tsplit(xy)\n\n n = (x - 0.3366) / (y - 0.1735)\n CCT = (\n -949.86315\n + 6253.80338 * np.exp(-n / 0.92159)\n + 28.70599 * np.exp(-n / 0.20039)\n + 0.00004 * np.exp(-n / 0.07125)\n )\n\n n = np.where(CCT > 50000, (x - 0.3356) / (y - 0.1691), n)\n\n CCT = np.where(\n CCT > 50000,\n 36284.48953\n + 0.00228 * np.exp(-n / 0.07861)\n + 5.4535e-36 * np.exp(-n / 0.01543),\n CCT,\n )\n\n return as_float(CCT)\n\n\ndef CCT_to_xy_Hernandez1999(\n CCT: FloatingOrArrayLike, optimisation_kwargs: Optional[Dict] = None\n) -> NDArray:\n \"\"\"\n Return the *CIE xy* chromaticity coordinates from given correlated colour\n temperature :math:`T_{cp}` using *Hernandez-Andres et al. (1999)* method.\n\n Parameters\n ----------\n CCT\n Correlated colour temperature :math:`T_{cp}`.\n optimisation_kwargs\n Parameters for :func:`scipy.optimize.minimize` definition.\n\n Returns\n -------\n :class:`numpy.ndarray`\n *CIE xy* chromaticity coordinates.\n\n Warnings\n --------\n *Hernandez-Andres et al. (1999)* method for computing *CIE xy* chromaticity\n coordinates from given correlated colour temperature is not a bijective\n function and might produce unexpected results. It is given for consistency\n with other correlated colour temperature computation methods but should be\n avoided for practical applications. The current implementation relies on\n optimization using :func:`scipy.optimize.minimize` definition and thus has\n reduced precision and poor performance.\n\n References\n ----------\n :cite:`Hernandez-Andres1999a`\n\n Examples\n --------\n >>> CCT_to_xy_Hernandez1999(6500.7420431786531) # doctest: +ELLIPSIS\n array([ 0.3127..., 0.329...])\n \"\"\"\n\n usage_warning(\n '\"Hernandez-Andres et al. (1999)\" method for computing '\n '\"CIE xy\" chromaticity coordinates from given correlated '\n \"colour temperature is not a bijective function and and\"\n \"might produce unexpected results. It is given for \"\n \"consistency with other correlated colour temperature \"\n \"computation methods but should be avoided for practical \"\n \"applications.\"\n )\n\n CCT = as_float_array(CCT)\n shape = list(CCT.shape)\n CCT = np.atleast_1d(CCT.reshape([-1, 1]))\n\n def objective_function(\n xy: ArrayLike, CCT: FloatingOrArrayLike\n ) -> FloatingOrNDArray:\n \"\"\"Objective function.\"\"\"\n\n objective = np.linalg.norm(\n xy_to_CCT_Hernandez1999(xy) - as_float_array(CCT)\n )\n\n return as_float(objective)\n\n optimisation_settings = {\n \"method\": \"Nelder-Mead\",\n \"options\": {\n \"fatol\": 1e-10,\n },\n }\n if optimisation_kwargs is not None:\n optimisation_settings.update(optimisation_kwargs)\n\n xy = as_float_array(\n [\n minimize(\n objective_function,\n x0=CCS_ILLUMINANTS[\"CIE 1931 2 Degree Standard Observer\"][\n \"D65\"\n ],\n args=(CCT_i,),\n **optimisation_settings,\n ).x\n for CCT_i in as_float_array(CCT)\n ]\n )\n\n return xy.reshape(shape + [2])\n","sub_path":"colour/temperature/hernandez1999.py","file_name":"hernandez1999.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"628362063","text":"import sys\r\nfrom pyspark.sql import *\r\nspark = SparkSession\\\r\n .builder\\\r\n .appName(\"customername\")\\\r\n .getOrCreate() # creating the spark session\r\n \r\nlines = spark.read.text(\"purchase\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\npurchase = parts.map(lambda p:\r\nRow(year=int(p[0]),cid=p[1],isbn=p[2],seller=p[3],price=int(p[4])))\r\npurchaseTable = spark.createDataFrame(purchase)\r\npurchaseTable.createOrReplaceTempView(\"purchase\") \r\n\r\nlines = spark.read.text(\"book\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\nbook = parts.map(lambda p: Row(isbn=p[0],name=p[1]))\r\nbookTable = spark.createDataFrame(book)\r\nbookTable.createOrReplaceTempView(\"book\")\r\n\r\nlines = spark.read.text(\"customer\").rdd.map(lambda r: r[0])\r\nparts = lines.map(lambda l: l.split(\"\\t\"))\r\ncustomer = parts.map(lambda p:\r\nRow(cid=p[0],name=p[1],age=int(p[2]),address=p[3],sex=p[4]))\r\ncustomerTable = spark.createDataFrame(customer)\r\ncustomerTable.createOrReplaceTempView(\"customer\")\r\n\r\ncustomername = spark.sql(\"select name from customer where cid IN (select distinct(purch.cid) as cid from purchase as purch INNER JOIN (select pur.cid as cid,pur.isbn as isbn from purchase as pur INNER JOIN (select cid from customer where name like '%Harry%') as harry ON pur.cid=harry.cid) as common ON purch.isbn=common.isbn and purch.cid != common.cid)\")\r\nNames = customername.rdd.map(lambda p: p.name).collect()\r\nf = open('customernames.txt','w') # Creating a ouput file named customernames in the current working directory\r\nfor name in Names:\r\n\tf.write(name+'\\n') # writing the names to the file\r\nf.close()\r\nspark.stop()\r\n","sub_path":"RelationalDataAnalysis.py","file_name":"RelationalDataAnalysis.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"479340455","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport re\nimport subprocess\nimport shutil\nfrom pathlib import Path\nfrom build import Builder\n\npackage_name = Builder.package_name_from_filename(__file__)\ndependencies = ()\n\n\ndef prepare(builder):\n archive_name = package_name + '-src.tgz'\n builder.extract(archive_name)\n return True\n\n\ndef build(builder):\n os.chdir('icu')\n os.chdir('source')\n\n \"\"\"\n List of relevant configure arguments:\n --enable-shared build shared libraries default=yes\n --enable-static build static libraries default=no\n --enable-extras build ICU extras default=yes\n --enable-layoutex build ICU's Paragraph Layout library default=yes.\n icu-le-hb must be installed via pkg-config. See http://harfbuzz.org\n\n --enable-tests build ICU tests default=yes\n --enable-samples build ICU samples default=yes\n\n --with-library-bits=bits specify how many bits to use for the library (32, 64, 64else32, nochange) default=nochange\n --with-data-packaging specify how to package ICU data. Possible values:\n files raw files (.res, etc)\n archive build a single icudtXX.dat file\n library shared library (.dll/.so/etc.)\n static static library (.a/.lib/etc.)\n auto build shared if possible (default)\n See http://userguide.icu-project.org/icudata for more info.\n \"\"\"\n common_configure_args = ['--disable-shared', '--enable-static', '--disable-extras', '--disable-layoutex',\n '--disable-tests', '--disable-samples',\n '--with-library-bits={}'.format(builder.target_platform_bits),\n '--with-data-packaging=static']\n if builder.toolset.startswith('msvc'):\n environment = builder.setup_env()\n # We need to add the /utf-8 flag to make cl treat source files UTF8 encoded.\n environment['CFLAGS'] = '{} /utf-8'.format(environment.get('CFLAGS', ''))\n environment['CXXFLAGS'] = '{} /utf-8'.format(environment.get('CXXFLAGS', ''))\n\n # Convert Windows path to Cygwin path.\n install_prefix = re.sub(r'(.):/(.*)', r'/cygdrive/\\1/\\2', builder.install_prefix.as_posix())\n\n configure_args = [(builder.cygwin / 'bin' / 'bash.exe').as_posix(), 'runConfigureICU', 'Cygwin/MSVC',\n '--prefix={}'.format(install_prefix)]\n configure_args.extend(common_configure_args)\n subprocess.check_call(configure_args, env=environment)\n\n # Replace linker parameter '-o' to '/out' in all Makefiles.\n for filename in Path('.').glob('**/Makefile'):\n print('* Patching compiler flags in file {}'.format(filename))\n with open(filename.as_posix(), 'r') as file:\n file_data = file.read()\n file_data = re.sub(r'(\\$\\(LINK\\.cc\\).*) -o (.*)', r'\\1 /OUT:\\2', file_data)\n with open(filename.as_posix(), 'w') as file:\n file.write(file_data)\n\n builder.make()\n builder.make(install=True)\n\n # The install target places DLL files in the lib folder, even though they actually belong in the bin folder.\n for filename in (builder.install_prefix / 'lib').glob('**/icu*64.dll'):\n shutil.move(filename, builder.install_prefix / 'bin' / filename.name)\n else:\n builder.configure(args=common_configure_args)\n builder.make(install=True)\n\n\ndef cleanup(builder):\n builder.remove_folder('icu')\n\n\nif __name__ == \"__main__\":\n print('You must not call this script directly.')\n sys.exit(1)\n","sub_path":"3rdparty/packages/icu4c-65_1.py","file_name":"icu4c-65_1.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"97354398","text":"import pandas as pd\nimport numpy as np\nimport warnings\n\nfrom ..utils.validation import isfloat\nfrom ..utils.utilities import list_del_indices\n__all__ = [\n 'missing_values',\n]\n\ndef cut_df(col, df, paste_col=False, on_right=False):\n \"\"\" \n To cut one or more columns from a dataframe as seprate dataframe.\n paste_col sets optional columns for the resulted dataframe. Both col and \n paste_col must be lists.\n on_right: select as many columns as length of 'col' from right side of \n dataframe. \n Notice: The order must had been considered in the 'paste_col'\n \"\"\" \n if on_right:\n n = len(col)\n df_paste = df.iloc[:,-n:]\n if paste_col:\n df_paste.columns = paste_col\n df = df.iloc[:,:-n]\n else:\n df_paste = df[col]\n if paste_col:\n df_paste.columns = paste_col\n df.drop(col,axis=1, inplace=True)\n return df, df_paste\n \ndef _check_object_col(df, name):\n \"\"\"\n Goals: \n - check if columns with type 'object' don't have elements that can be \n converted to numeric values.\n - remove columns with all non numeric elements.\n \"\"\"\n object_cols = [df.dtypes.index[i] for i, typ in enumerate(df.dtypes) if typ == \"object\"]\n for col in object_cols:\n for i, value in enumerate(df[col]):\n if isfloat(value):\n raise ValueError(\"column '%s' in '%s' includes both string and float values.\" %(str(col),name))\n # drop object columns\n if len(object_cols)>0:\n df = df.drop(object_cols,1)\n return df\n \nclass missing_values(object):\n \"\"\" Handle all the missing values.\n \n Parameters\n ----------\n strategy: string, optional (default=\"ignore_row\")\n \n list of strategies:\n - interpolate: interpolate based on sorted target values\n - zero: set to the zero\n - ignore_row: remove the entire row in data and target\n - ignore_column: remove the entire column in data and target\n\n string_as_null: boolean, optional (default=True)\n If True non numeric elements are considered to be null in computations.\n \n missing_values: list, optional (default=None)\n where you define specific formats of missing values. It is a list of string, float or integer values.\n\n inf_as_null: boolean, optional (default=True)\n If True inf and -inf elements are considered to be null in computations.\n\n Returns\n -------\n data frame\n mask: Only if strategy = ignore_row. Mask is a binary pandas series which stores the information regarding removed\n \"\"\"\n def __init__(self, strategy=\"ignore_row\", string_as_null = True,\n inf_as_null = True, missing_values = None):\n self.strategy = strategy\n self.string_as_null = string_as_null\n self.inf_as_null = inf_as_null\n self.missing_values = missing_values\n \n def fit_transform(self, df):\n \"\"\"\n use fit_transform for:\n - replace missing values with nan.\n - drop columns with all nan values.\n - fill nan values with the specified strategy.\n\n :param:\n df: pandas data frame\n :attribute:\n mask: binary pandas series, only if strategy = 'ignore_row' or 'ignore_column'\n mask is a binary vector whose length is the number of rows/indices in the df. The index of each bit shows\n if the row/column in the same position has been removed or not.\n The goal is keeping track of removed rows/columns to change the target data frame or other input data frames based\n on that. The mask can later be used in the transform method to change other data frames in the same way.\n \"\"\"\n if self.inf_as_null == True:\n df.replace([np.inf, -np.inf,'inf','-inf'], np.nan, True)\n if self.string_as_null == True:\n df = df.convert_objects(convert_numeric=True)\n if isinstance(self.missing_values, (list, tuple)):\n for pattern in self.missing_values:\n df.replace(pattern, np.nan, True)\n\n df = _check_object_col(df, 'df')\n # drop null columns\n df.dropna(axis=1, how='all', inplace=True)\n\n if self.strategy == 'zero':\n for col in df.columns:\n df[col].fillna(value=0,inplace=True)\n return df\n elif self.strategy == 'ignore_row':\n dfi = df.index\n df.dropna(axis=0, how='any', inplace=True)\n mask=[i in df.index for i in dfi]\n self.mask = pd.Series(mask, index=dfi)\n # self.mask = pd.notnull(df).all(1)\n # df = df[self.mask]\n return df\n elif self.strategy == 'ignore_column':\n dfc = df.columns\n df.dropna(axis=1, how='any', inplace=True)\n mask=[i in df.columns for i in dfc]\n self.mask = pd.Series(mask, index=dfc)\n # self.mask = pd.notnull(df).all(0)\n # df = df.T[self.mask].T\n return df\n elif self.strategy == 'interpolate':\n df = df.interpolate()\n df.fillna(method='ffill',axis=1, inplace=True) # because of nan in the first and last element of column\n return df\n else:\n msg = \"Wrong strategy has been passed\"\n raise TypeError(msg)\n\n def transform(self, df):\n \"\"\"\n Only if the class is fitted with 'ignore_row' or 'ignore_column' strategies.\n\n :param df: pandas dataframe\n :return: transformed data frame based on the mask vector from fit_transform method.\n \"\"\"\n if self.strategy == 'ignore_row':\n return df[self.mask]\n elif self.strategy == 'ignore_column':\n return df.loc[:,self.mask]\n else:\n msg = \"The transform method doesn't change the dataframe if strategy='zero' or 'interpolate'. You should fit_transform the new dataframe with those methods.\"\n warnings.warn(msg)\n","sub_path":"cheml/preprocessing/handle_missing.py","file_name":"handle_missing.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"134285291","text":"#how to Define tuple\n\ntp=()\n\n#possible to store same and different type of data\n\ntple=(10,\"sibin\",True)\nprint(tple )\n\n#it possibe to store duplicate value\n\ntple1=(10,\"sibin\",True,10,\"sibin\",True)\nprint(tple1)\n\n# insertion order is preserved\n\n#tuple is immutable means that can't update","sub_path":"python collections/Tuple/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"158105759","text":"#!/usr/bin/env python\n# -*-coding:utf_8-*-\n\nimport random\nimport time\n\n\n# Class created to get the mock values.\nclass Sensorslibrary(object):\n \"\"\"Sensorslibrary\"\"\"\n\n def __init__(self):\n super(Sensorslibrary, self).__init__()\n\n @staticmethod\n def nfc():\n time.sleep(1)\n if random.randint(1, 4) == 3:\n return random.choice(['ABCD136468', 'BCDE789514', 'CDEF663247'])\n return None\n\n @staticmethod\n def flow():\n time.sleep(1)\n flow = 0.0\n a = 15\n while a > 0:\n flow += random.random()\n a -= 1\n time.sleep(1)\n return flow\n","sub_path":"sensors/sensorslibrary.py","file_name":"sensorslibrary.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"532262398","text":"import urllib.request\nimport urllib.parse\nimport random\nimport hashlib\nfrom gtk import *\n\ndef __translate(lin, lout, fy, text):\n\tif text == '':\n\t\treturn ''\n\tif fy == 'Baidu':\n\t\tlin = list_b2[lin]\n\t\tlout = list_b2[lout]\n\t\turl = 'http://fanyi.baidu.com/transapi'\n\t\tdata = {\"query\": text, 'from': lin, 'to': lout}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'method': 'POST'}\n\t\tdata = urllib.parse.urlencode(data).encode('utf-8')\n\t\treq = urllib.request.Request(url, data, headers)\n\t\tresponse = urllib.request.urlopen(req)\n\t\thtml = response.read().decode('utf-8')\n\telif fy == 'Google':\n\t\tlin = list_g2[lin]\n\t\tlout = list_g2[lout]\n\t\turl = 'https://translate.google.cn/translate_a/single'\n\t\tdata = {\"q\": text}\n\t\tparams = {'client': 't', 'sl': lin, 'tl': lout, 'hl': 'en','dt': 'at', 'dt': 'bd', 'dt': 'ex', 'dt': 'ld', 'dt': 'md','dt': 'qca', 'dt': 'rw', 'dt': 'rm', 'dt': 'ss', 'dt': 't','ie': 'UTF-8', 'oe': 'UTF-8', 'source': 'bh', 'ssel': '0','tsel': '0', 'kc': '1', 'tk': ''}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36','Referer': 'https://translate.google.cn/'}\n\t\tparams['tk'] = TK.get_tk(text)\n\t\t#data = urllib.parse.urlencode(data).encode('utf-8')\n\t\tres = requests.post(url, headers = headers, data = data, params = params)\n\t\t#res.raise_for_status()\n\t\thtml = res.text\n\telse: \n\t\tlin = list_y2[lin]\n\t\tlout = list_y2[lout]\n\t\t#print(lin,lout)\n\t\turl= 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n\t\t# 发送给有道服务器的数据\n\t\tu = 'fanyideskweb'\n\t\tf = str(int(time.time()*1000) + random.randint(1,10))\n\t\tc = 'ebSeFb%=XZ%T[KZ)c(sy!'\n\t\tsign = hashlib.md5((u + text + f + c).encode('utf-8')).hexdigest()\n\t\tdata = {'i': text,'from':lin,'to':lout,'salt': f, 'sign': sign,'client': u, 'doctype': 'json','keyfrom': 'fanyi.web'}\n\t\theaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'method': 'POST','Referer': 'http://fanyi.youdao.com/',}\n\t\tdata = urllib.parse.urlencode(data).encode('utf-8')\n\t\treq = urllib.request.Request(url, data, headers)\t\n\t\tresponse = urllib.request.urlopen(req)\n\t\thtml = response.read().decode('utf-8')\n\tif fy == 'Baidu':\n\t\treturn json.loads(html)['data'][0]['dst']\n\telif fy == 'Google':\n\t\treturn json.loads(html)[0][0][0]\n\telse: \n\t\t#print(json.loads(html))\n\t\treturn json.loads(html)['translateResult'][0][0]['tgt']\n\t\t\nTK = CalcTk()\n\nlist_b1 =['中文','英语','粤语','文言文','日语','韩语','法语','西班牙语','泰语','阿拉伯语',\n'俄语','葡萄牙语','德语','意大利语','希腊语','荷兰语','波兰语','保加利亚语','爱沙尼亚语','丹麦语','芬兰语',\n'捷克语','罗马尼亚语','斯洛语尼亚语','瑞典语','匈牙利语','繁体中文','越南语']\n\nlist_b2 = {'auto':'auto','中文':'zh','英语':'en','粤语':'yue','文言文':'wyw','日语':'jp','韩语':'kor','法语':'fra','西班牙语':'spa','泰语':'th','阿拉伯语':'ara',\n'俄语':'ru','葡萄牙语':'pt','德语':'de','意大利语':'it','希腊语':'el','荷兰语':'nl','波兰语':'pl','保加利亚语':'bul','爱沙尼亚语':'est',\n'丹麦语':'dan','芬兰语':'fin','捷克语':'cs','罗马尼亚语':'rom','斯洛语尼亚语':'slo','瑞典语':'swe','匈牙利语':'hu','繁体中文':'cht','越南语':'vie'}\n\nlist_y1 = ['中文','日语','英语','韩语','法语','阿拉伯语','波兰语','丹麦语','德语','俄语','芬兰语',\n'荷兰语','捷克语','罗马尼亚语','挪威语','葡萄牙语','瑞典语','斯洛伐克语','西班牙语','印地语',\n'印度尼西亚语','意大利语','泰语','土耳其语','希腊语','匈牙利语']\n\nlist_y2 ={'auto':'auto','中文':'zh-CHS','日语':'ja','英语':'EN','韩语':'ko','法语':'fr','阿拉伯语':'ar','波兰语':'pl','丹麦语':'da','德语':'de','俄语':'ru','芬兰语':'fi',\n'荷兰语':'nl','捷克语':'cs','罗马尼亚语':'ro','挪威语':'no','葡萄牙语':'pt','瑞典语':'sv','斯洛伐克语':'sk','西班牙语':'es','印地语':'hi',\n'印度尼西亚语':'id','意大利语':'it','泰语':'th','土耳其语':'tr','希腊语':'el','匈牙利语':'hu'}\n\nlist_g1 = ['中文','中文(简体)','中文(繁体)','英语','南非语','俄语','法语','阿拉伯语','意大利语','日语','丹麦语','德语',\n'希腊语','世界语','西班牙语','爱沙尼亚语','巴士克语','法斯语','芬兰语','法罗语','加里西亚语','古吉拉特语','阿塞拜疆语','比利时语','保加利亚语','加泰隆语','捷克语',\n'希伯来语','印地语','克罗地亚语','匈牙利语','亚美尼亚语','印度尼西亚语','冰岛语','格鲁吉亚语','哈萨克语','卡纳拉语','朝鲜语','孔卡尼语','吉尔吉斯语',\n'立陶宛语','拉脱维亚语','毛利语','马其顿语','蒙古语','马拉地语','马来语','马耳他语','挪威语','荷兰语','北梭托语','威尔士语','第维埃语',\n'旁遮普语','波兰语','葡萄牙语','克丘亚语','罗马尼亚语','梵文','北萨摩斯语','斯洛伐克语','斯洛文尼亚语','阿尔巴尼亚语','瑞典语','斯瓦希里语','叙利亚语',\n'泰米尔语','泰卢固语','泰语','塔加路语','茨瓦纳语','土耳其语','宗加语','鞑靼语','乌克兰语','乌都语','乌兹别克语','越南语',\n'班图语','祖鲁语']\nlist_g2 = {'auto':'auto','南非语':'af','阿拉伯语':'ar','阿塞拜疆语':'az','比利时语':'be','保加利亚语':'bg','加泰隆语':'ca','捷克语':'cs','威尔士语':'cy','丹麦语':'da','德语':'de','第维埃语':'dv',\n'希腊语':'el','英语':'en','世界语':'eo','西班牙语':'es','爱沙尼亚语':'et','巴士克语':'eu','法斯语':'fa','芬兰语':'fi','法罗语':'fo','法语':'fr','加里西亚语':'gl','古吉拉特语':'gu',\n'希伯来语':'he','印地语':'hi','克罗地亚语':'hr','匈牙利语':'hu','亚美尼亚语':'hy','印度尼西亚语':'id','冰岛语':'is','意大利语':'it','日语':'ja','格鲁吉亚语':'ka','哈萨克语':'kk','卡纳拉语':'kn','朝鲜语':'ko','孔卡尼语':'kok','吉尔吉斯语':'ky',\n'立陶宛语':'lt','拉脱维亚语':'lv','毛利语':'mi','马其顿语':'mk','蒙古语':'mn','马拉地语':'mr','马来语':'ms','马耳他语':'mt','挪威语':'nb','荷兰语':'nl','北梭托语':'ns',\n'旁遮普语':'pa','波兰语':'pl','葡萄牙语':'pt','克丘亚语':'qu','罗马尼亚语':'ro','俄语':'ru','梵文':'sa','北萨摩斯语':'se','斯洛伐克语':'sk','斯洛文尼亚语':'sl','阿尔巴尼亚语':'sq','瑞典语':'sv','斯瓦希里语':'sw','叙利亚语':'syr',\n'泰米尔语':'ta','泰卢固语':'te','泰语':'th','塔加路语':'tl','茨瓦纳语':'tn','土耳其语':'tr','宗加语':'ts','鞑靼语':'tt','乌克兰语':'uk','乌都语':'ur','乌兹别克语':'uz','越南语':'vi',\n'班图语':'xh','中文':'zh','中文(简体)':'zh-CN','中文(繁体)':'zh-TW','祖鲁语':'zu'}\n","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"16075380","text":"import statistics\r\ndef make_bin(li):\r\n leng = len(li)\r\n count = 0\r\n final_bin = []\r\n bin_size = int(input(\"Enter bin size : - \"))\r\n rem = leng % bin_size\r\n\r\n if rem != 0:\r\n while(count < leng - rem):\r\n temp_li = []\r\n temp = 0\r\n while temp != bin_size:\r\n temp_li = temp_li + [li[temp + count]]\r\n temp = temp + 1\r\n final_bin.append(temp_li)\r\n count = count + bin_size\r\n\r\n count1 = 0\r\n temp_li = []\r\n\r\n while count1 != rem:\r\n temp_li = temp_li + [li[count]]\r\n\r\n count = count + 1\r\n count1 = count1 + 1\r\n else:\r\n while (count != leng):\r\n temp_li = []\r\n temp = 0\r\n while temp != bin_size:\r\n if li[temp + count] is None:\r\n break\r\n else:\r\n temp_li = temp_li + [li[temp + count]]\r\n temp = temp + 1\r\n final_bin.append(temp_li)\r\n count = count + bin_size\r\n final_bin.append(temp_li)\r\n\r\n return final_bin\r\n\r\ndef mean_bi(bi):\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n le = len(bi[count])\r\n total = 0\r\n count2 = 0\r\n while count2 != le:\r\n total = total + bi[count][count2]\r\n count2 = count2 +1\r\n\r\n me = float(total / le)\r\n count3 = 0\r\n while count3 != le:\r\n temp = temp + [me]\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\n\r\ndef median_bi(bi):\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n\r\n le = len(bi[count])\r\n me = statistics.median(bi[count])\r\n count3 = 0\r\n while count3 != le:\r\n temp = temp + [me]\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\ndef range_bi(bi):\r\n\r\n l = len(bi)\r\n count = 0\r\n fi = []\r\n while count != l:\r\n temp = []\r\n max_bi = max(bi[count])\r\n min_bi = min(bi[count])\r\n le = len(bi[count])\r\n\r\n count3 = 0\r\n while count3 != le:\r\n mi = abs(bi[count][count3]-min_bi)\r\n ma = abs(max_bi-bi[count][count3])\r\n if mi <= ma :\r\n temp = temp + [min_bi]\r\n elif mi > ma :\r\n temp = temp + [max_bi]\r\n\r\n count3 = count3 + 1\r\n fi.append(temp)\r\n count = count + 1\r\n return fi\r\n\r\n\r\nli = [13, 15, 16, 16, 19, 20, 20, 21, 22, 22, 25, 25, 25, 25, 30, 33, 33, 35, 5, 35, 35, 36, 40, 45, 46, 52, 70 ]\r\nbi = make_bin(li)\r\nprint(mean_bi(bi))\r\nprint(median_bi(bi))\r\nprint(range_bi(bi))\r\n\r\n","sub_path":"binning.py","file_name":"binning.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"169679759","text":"\"\"\"Tests for api module.\"\"\"\n\nimport pytest\nimport vcr\nimport os\nfrom shapely.geometry import Polygon\nfrom landsatxplore import api, errors\n\n\nBRUSSELS_AREA = Polygon(\n [(4.25, 50.75), (4.50, 50.75), (4.50, 50.95), (4.25, 50.95), (4.25, 50.75)]\n)\n\n\ndef test__random_string():\n str_a = api._random_string(length=10)\n str_b = api._random_string(length=10)\n assert str_a != str_b\n assert len(str_a) == 10\n assert len(str_b) == 10\n\n\ndef test_coordinate():\n coord = api.Coordinate(4.35, 50.85)\n assert coord == {\"longitude\": 4.35, \"latitude\": 50.85}\n\n\ndef test_geojson():\n geojson = api.GeoJson(BRUSSELS_AREA.__geo_interface__)\n assert geojson[\"type\"] == \"Polygon\"\n assert len(geojson[\"coordinates\"]) == 5\n assert geojson[\"coordinates\"][0] == {\"longitude\": 4.25, \"latitude\": 50.75}\n\n\ndef test_spatial_filter_mbr():\n mbr = api.SpatialFilterMbr(*BRUSSELS_AREA.bounds)\n assert mbr[\"filterType\"] == \"mbr\"\n assert mbr[\"lowerLeft\"] == {\"longitude\": 4.25, \"latitude\": 50.75}\n assert mbr[\"upperRight\"] == {\"longitude\": 4.5, \"latitude\": 50.95}\n\n\ndef test_spatial_filter_geojson():\n sfilter = api.SpatialFilterGeoJSON(BRUSSELS_AREA.__geo_interface__)\n assert sfilter[\"filterType\"] == \"geoJson\"\n\n\ndef test_acquisition_filter():\n afilter = api.AcquisitionFilter(\"2000-01-01\", \"2001-12-31\")\n assert afilter[\"start\"] == \"2000-01-01\"\n assert afilter[\"end\"] == \"2001-12-31\"\n\n\ndef test_cloud_cover_filter():\n cfilter = api.CloudCoverFilter(max=10)\n assert cfilter[\"min\"] == 0\n assert cfilter[\"max\"] == 10\n\n\ndef test_metadata_value():\n mfilter = api.MetadataValue(\n field_id=\"5e83d08fd4594aae\", value=\"LT05_L1GS_173058_20111028_20161005_01_T2\"\n )\n assert mfilter[\"filterType\"] == \"value\"\n assert mfilter[\"filterId\"] == \"5e83d08fd4594aae\"\n assert mfilter[\"value\"] == \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n assert mfilter[\"operand\"] == \"like\"\n\n\n@pytest.fixture(scope=\"module\")\ndef ee_api():\n def _filter_credentials(request):\n if \"password\" in str(request.body):\n request.body = None\n return request\n\n with vcr.use_cassette(\n \"tests/fixtures/vcr_cassettes/api_login.yaml\",\n before_record_request=_filter_credentials,\n ):\n ee = api.API(\n os.getenv(\"LANDSATXPLORE_USERNAME\"), os.getenv(\"LANDSATXPLORE_PASSWORD\")\n )\n return ee\n\n\ndef test_api_login(ee_api):\n assert ee_api.session.headers.get(\"X-Auth-Token\")\n\n\ndef test_api_login_error():\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_login_error.yaml\"):\n with pytest.raises(errors.USGSAuthenticationError):\n api.API(\"bad_username\", \"bad_password\")\n\n\ndef test_api_get_scene_id(ee_api):\n\n # Single Product ID\n PRODUCT_ID = \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_id.yaml\"):\n scene_id = ee_api.get_scene_id(PRODUCT_ID, dataset=\"landsat_tm_c1\")\n assert scene_id == \"LT51730582011301MLK00\"\n\n # Multiple Product IDs\n PRODUCT_IDS = [\n \"LT05_L1GS_173058_20111028_20161005_01_T2\",\n \"LT05_L1GS_173057_20010407_20171209_01_T2\",\n ]\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_ids.yaml\"):\n scene_ids = ee_api.get_scene_id(PRODUCT_IDS, dataset=\"landsat_tm_c1\")\n assert scene_ids == [\"LT51730582011301MLK00\", \"LT51730572001097LBG00\"]\n\n\ndef test_api_metadata(ee_api):\n\n # Collection 1\n SCENE_ID = \"LT51730582011301MLK00\"\n DATASET = \"landsat_tm_c1\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_metadata_c1.yaml\"):\n metadata = ee_api.metadata(SCENE_ID, DATASET)\n assert metadata[\"entityId\"] == SCENE_ID\n assert metadata[\"landsat_scene_id\"] == SCENE_ID\n\n # Collection 2\n SCENE_ID = \"LT51730582011301MLK00\"\n DATASET = \"landsat_tm_c2_l1\"\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_scene_metadata_c2.yaml\"):\n metadata = ee_api.metadata(SCENE_ID, DATASET)\n assert metadata[\"entityId\"] == SCENE_ID\n assert metadata[\"collection_number\"] == 2\n\n\ndef test_api_get_product_id(ee_api):\n\n SCENE_ID = \"LT51730582011301MLK00\"\n\n # Collection 1\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_productid_c1.yaml\"):\n product_id = ee_api.get_product_id(SCENE_ID, \"landsat_tm_c1\")\n assert product_id == \"LT05_L1GS_173058_20111028_20161005_01_T2\"\n\n # Collection 2\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_productid_c2.yaml\"):\n product_id = ee_api.get_product_id(SCENE_ID, \"landsat_tm_c2_l2\")\n assert product_id == \"LT05_L2SP_173058_20111028_20200820_02_T1\"\n\n\ndef test_api_search(ee_api):\n\n # Longitude and Latitude\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c1_lonlat.yaml\"):\n scenes = ee_api.search(\n \"landsat_8_c1\",\n longitude=4.38,\n latitude=50.85,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-07\",\n max_results=5,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n\n # Bounding box\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c1_bbox.yaml\"):\n scenes = ee_api.search(\n \"landsat_8_c1\",\n bbox=BRUSSELS_AREA.bounds,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-07\",\n max_results=5,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n\n # Collection 2\n with vcr.use_cassette(\"tests/fixtures/vcr_cassettes/api_search_c2.yaml\"):\n scenes = ee_api.search(\n \"landsat_ot_c2_l2\",\n longitude=4.38,\n latitude=50.85,\n start_date=\"2018-01-01\",\n end_date=\"2018-01-31\",\n max_results=10,\n )\n assert len(scenes) >= 1\n assert \"cloudCover\" in scenes[0]\n assert scenes[0][\"displayId\"][5:7] == \"L2\"\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"516194848","text":"from 二元文法 import read_txt\n\n\nclass caculate():\n \"\"\"\n 和上次一模一样\n \"\"\"\n def __init__(self, filename):\n #\n # jieba分词的结果\n self.true_correct_txt = read_txt(\"./txt/jieba.txt\")\n self.true_correct_num = len(self.true_correct_txt)\n # 这一步将列表转为字典,因为字典查询较快\n self.true_correct_dic = self.changeIntoDic(self.true_correct_txt)\n # 样本信息条数(除去标点符号,只保留中文)\n self.sample_num_txt = read_txt(filename)\n self.sample_num = len(self.sample_num_txt)\n\n self.my_correct_txt = self.getMyCorrectNum()\n self.my_correct_num = len(self.my_correct_txt)\n\n self.Precision = self.caculatePrecision()\n self.Recall = self.caculateRecall()\n self.FScore = self.caculateFScore()\n pass\n\n def changeIntoDic(self, txt):\n result = {}\n for t in txt:\n result[t] = 1\n return result\n\n def getMyCorrectNum(self):\n result = []\n for s in self.sample_num_txt:\n if s in self.true_correct_dic:\n result.append(s)\n return result\n\n def caculatePrecision(self):\n return self.my_correct_num / self.true_correct_num\n\n def caculateRecall(self):\n return self.my_correct_num / self.sample_num\n\n def caculateFScore(self):\n return (2 * self.Precision * self.Recall) / (self.Precision + self.Recall)\n\n\nif __name__ == '__main__':\n print(\"FMM:\")\n fmm = caculate(\"./txt/fmm.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (fmm.Precision, fmm.Recall, fmm.FScore))\n print(\"BMM:\")\n bmm = caculate(\"./txt/bmm.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (bmm.Precision, bmm.Recall, bmm.FScore))\n print(\"消除歧义后的:\")\n two_way = caculate(\"./txt/消歧结果.txt\")\n print(\"Precision:%f,Recall:%f,FScore:%f\" % (two_way.Precision, two_way.Recall, two_way.FScore))\n # print(\"mmseg:\")\n # mmseg = caculate(\"./data/MMSEG.txt\")\n # print(\"Precision:%f,Recall:%f,FScore:%f\" % (mmseg.Precision, mmseg.Recall, mmseg.FScore))\n","sub_path":"2元文法进行文本分词消歧/其他.py","file_name":"其他.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"99147194","text":"\nimport sys\nimport numpy as np\nimport pickle\nimport pdb\nimport matplotlib.pyplot as plt\nfrom syncUtils import getPhasesFromVoltages\nfrom circularStats import circularVariance\nfrom utils import alignMeasurements\n\ndef main(argv):\n def computeCircularVariances(phases1, phases2):\n cvs = np.empty(len(phases1))\n for i in xrange(len(phases1)):\n cvs[i] = circularVariance(angles=(phases1[i], phases2[i]))\n return cvs\n\n if len(argv)!=2:\n print(\"Usage: %s \"%argv[0])\n sys.exit(1)\n\n selfCouplingStrength = float(argv[1])\n epsilon = 0.1\n i0 = 10\n couplingStartTime = 99.04\n colorNeuron0 = \"blue\"\n colorNeuron1 = \"green\"\n linestyleCoupled = \"-\"\n linestyleUncoupled = \":\"\n integrationFilename = \"results/integrationWCoupledINapIKHighThresholdWithSelfCouplingStrength%.02fI0%.02fEpsilon%.06fCouplingStartTime%.02f.npz\"%(selfCouplingStrength, i0, epsilon, couplingStartTime)\n figFilenamePattern = \"figures/fig10_25INapIKHighThresholdWithSelfCouplingStrength%.02fI0%.02fEpsilon%.06fCouplingStart%.02f.%s\"\n\n figFilename = figFilenamePattern%(selfCouplingStrength, i0, epsilon, \n couplingStartTime, \n \"eps\")\n res = np.load(integrationFilename)\n timesCoupled = res[\"timesCoupled\"]\n voltagesCoupledNeuron0 = res[\"ysCoupled\"][0,:]\n voltagesCoupledNeuron1 = res[\"ysCoupled\"][2,:]\n voltagesUncoupledNeuron0 = res[\"ys0Uncoupled\"][0,:]\n timesUncoupledNeuron0 = res[\"times0Uncoupled\"]\n voltagesUncoupledNeuron1 = res[\"ys1Uncoupled\"][0,:]\n timesUncoupledNeuron1 = res[\"times1Uncoupled\"]\n sampleRate = 1.0/(timesCoupled[1]-timesCoupled[0])\n\n resPhasesNeuron0 = getPhasesFromVoltages(times=timesCoupled, \n voltages=voltagesCoupledNeuron0)\n phasesNeuron0 = resPhasesNeuron0[\"phases\"]\n timePhasesNeuron0 = resPhasesNeuron0[\"times\"]\n resPhasesNeuron1 = getPhasesFromVoltages(times=timesCoupled,\n voltages=voltagesCoupledNeuron1)\n phasesNeuron1 = resPhasesNeuron1[\"phases\"]\n timePhasesNeuron1 = resPhasesNeuron1[\"times\"]\n spikeTimesNeuron0 = resPhasesNeuron0[\"spikeTimes\"]\n uncoupledSpikeTimesNeuron0 = spikeTimesNeuron0[spikeTimesNeuron0[^/]+)/{self.service_path}/?(?P.*)?\"\n\n def add_existing(self, obj):\n return self.model.add_existing(obj)\n\n def create(self, request):\n obj = self._schema_draft().loads(request.body)\n data = self.model.add(obj)\n return create_response(request, json=data)\n\n def get_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n if obj:\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def get_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n if obj:\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def query(self, request):\n params = utils.parse_request_params(abstract.AbstractQuerySchema, request)\n results = self.model.query(params.get(\"where\"))\n total_count = len(results)\n if params.get(\"limit\"):\n results = results[: params[\"limit\"]]\n\n data = {\n \"count\": len(results),\n \"total\": total_count,\n \"offset\": 0,\n \"results\": self.model._resource_schema().load(results, many=True),\n }\n content = self._schema_query_response().dumps(data)\n return create_response(request, text=content)\n\n def update_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n return self._update(request, obj)\n\n def update_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n return self._update(request, obj)\n\n def delete_by_id(self, request, id):\n obj = self.model.get_by_id(id)\n if obj:\n response = self._validate_resource_version(request, obj)\n if response is not None:\n return response\n\n obj = self.model.delete_by_id(id)\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def delete_by_key(self, request, key):\n obj = self.model.get_by_key(key)\n if obj:\n response = self._validate_resource_version(request, obj)\n if response is not None:\n return response\n\n obj = self.model.delete_by_key(key)\n return create_response(request, json=obj)\n return create_response(request, status_code=404)\n\n def _update(self, request, obj):\n if not obj:\n return create_response(request, status_code=404)\n\n update = self._schema_update().load(request.json())\n if update.actions:\n obj, err = self._apply_update_actions(obj, update)\n if err:\n return create_response(request, json=err, status_code=err[\"statusCode\"])\n return create_response(request, json=obj)\n\n def _validate_resource_version(self, request, obj):\n update_version = self._get_version_from_request(request)\n if update_version != obj[\"version\"]:\n data = self._create_version_error_response(obj[\"version\"])\n return create_response(request, json=data, status_code=409)\n\n def _get_version_from_request(self, request):\n version_data = request.qs.get(\"version\")\n if version_data:\n return int(version_data[0])\n return request.json().get(\"version\")\n\n def _apply_update_actions(self, obj, update):\n original_obj = obj\n\n for action in update.actions:\n func = self._actions.get(action.action)\n if not func:\n print(\"Missing action for\", action.action)\n continue\n try:\n obj = func(self, obj, action)\n except utils.InternalUpdateError as exc:\n return None, self._create_data_error_response(str(exc), obj)\n\n # Save the updated object to the model\n if obj != original_obj:\n if obj[\"version\"] != update.version:\n return None, self._create_version_error_response(obj[\"version\"])\n self.model.save(obj)\n\n # Temporary\n elif not self._actions:\n self.model.save(obj)\n\n return obj, None\n\n def _create_data_error_response(self, message, obj):\n return schemas.ErrorResponseSchema().dump(\n types.ErrorResponse(\n status_code=400,\n message=message,\n errors=[\n types.ConcurrentModificationError(\n message=message,\n current_version=obj['version']\n )\n ],\n )\n )\n\n\n def _create_version_error_response(self, version):\n return schemas.ErrorResponseSchema().dump(\n types.ErrorResponse(\n status_code=409,\n message=\"Version mismatch. Concurrent modification.\",\n errors=[\n types.ConcurrentModificationError(\n message=\"Version mismatch. Concurrent modification.\",\n current_version=version,\n )\n ],\n )\n )\n","sub_path":"src/commercetools/testing/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"291993060","text":"#!/usr/bin/env python\n\nimport json\n\nfrom . import mb\nfrom flask import render_template\nfrom flask import request\n\nimport os\nfrom algorithm_toolkit import app\n\nfrom sarpy.deprecated.tools import FrameGenerator\nfrom sarpy.deprecated.tools import FrameForm\n\ncam = FrameGenerator()\n\n\n@app.route('/taser/')\ndef index():\n form = FrameForm()\n \"\"\"Image Blending home page.\"\"\"\n return render_template('index.html', form=form)\n\n\n@mb.route('/taser/update_image_path', methods=['POST'])\ndef set_image_path():\n\n image_path = os.path.normpath(request.values.get('image_path', ''))\n tnx = int(request.values.get('tnx', ''))\n tny = int(request.values.get('tny', ''))\n\n nx, ny = cam.set_image_path(image_path, tnx, tny)\n\n return json.dumps({'nx': nx, 'ny': ny})\n\n\n@mb.route('/taser/update_image_content', methods=['POST'])\ndef crop_image():\n\n minx = int(round(float((request.values.get('minx', '')))))\n maxx = int(round(float((request.values.get('maxx', '')))))\n miny = int(round(float((request.values.get('miny', '')))))\n maxy = int(round(float((request.values.get('maxy', '')))))\n tnx = int(round(float((request.values.get('tnx', '')))))\n tny = int(round(float((request.values.get('tny', '')))))\n\n cam.crop_image(minx, miny, maxx, maxy, tnx, tny)\n\n return ''\n\n\n@mb.route('/taser/ortho_image', methods=['POST'])\ndef ortho_image():\n\n output_image_path = os.path.normpath(request.values.get('input', ''))\n cam.ortho_image(output_image_path)\n\n return ''\n\n\n@mb.route('/taser/get_frame', methods=['POST'])\ndef get_image():\n return cam.get_frame()\n","sub_path":"sarpy/deprecated/tools/taser_web/views/taser.py","file_name":"taser.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"605470639","text":"# -*-coding:UTF-8-*-\n\nimport heapq\nimport threading\n\nclass PriorityQueue:\n def __init__(self):\n self._queue = []\n self._count = 0\n self._cv = threading.Condition()\n\n def put(self, item, priority):\n with self._cv:\n heapq.heappush(self._queue, (-priority, self._count, item))\n self._count += 1\n self._cv.notify()\n\n def get(self):\n with self._cv:\n while len(self._queue)== 0:\n self._cv.wait()\n return heapq.heappop(self._queue)[-1]\n\nif __name__ == '__main__':\n pQueue = PriorityQueue()\n pQueue.put('a',1)\n print(pQueue.get())\n\n","sub_path":"prioriry_queue_demo.py","file_name":"prioriry_queue_demo.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"440997165","text":"# coding:utf8\n\"\"\"\n 123. 买卖股票的最佳时机 III\n 给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。\n 设计一个算法来计算你所能获取的最大利润。你最多可以完成 两笔 交易。\n 注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n 示例 1:\n 输入:prices = [3,3,5,0,0,3,1,4]\n 输出:6\n 解释:在第 4 天(股票价格 = 0)的时候买入,在第 6 天(股票价格 = 3)的时候卖出,这笔交易所能获得利润 = 3-0 = 3 。\n 随后,在第 7 天(股票价格 = 1)的时候买入,在第 8 天 (股票价格 = 4)的时候卖出,这笔交易所能获得利润 = 4-1 = 3 。\n 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-iii\n\"\"\"\nfrom typing import List\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n return self.maxProfit_v1(prices)\n def maxProfit_v1(self, prices: List[int]) -> int:\n \"\"\"\n dp table\n dp[i][k][j] = x 表示第i天, 交易次数为k, 持有股票状态为j的最大利润为x\n 其中 j = {0, 1}, 0 <= k <= K\n ans: max(dp[n - 1][k][0])\n 每次买入作为一笔交易\n dp[i][k][0] = max(dp[i - 1][k][0], dp[i - 1][k][1] + prices[i])\n dp[i][k][1] = max(dp[i - 1][k - 1][0] - prices[i], dp[i - 1][k][1])\n base case:\n dp[0][0][0] = 0\n dp[0][1][0] = -inf\n dp[0][0][1] = -inf\n dp[0][1][1] = -prices[0]\n \"\"\"\n if not prices:\n return 0\n k = 2\n dp = [[[0] * 2 for _ in range(k + 1)] for _ in range(len(prices))]\n dp[0][0][0] = 0\n dp[0][1][0] = 0\n dp[0][0][1] = float('-inf')\n dp[0][1][1] = -prices[0]\n dp[0][2][0] = 0\n dp[0][2][1] = -prices[0]\n \n for i in range(1, len(prices)):\n for j in range(1, k + 1):\n dp[i][j][0] = max(dp[i - 1][j][0], dp[i - 1][j][1] + prices[i])\n dp[i][j][1] = max(dp[i - 1][j - 1][0] - prices[i], dp[i - 1][j][1])\n return max(dp[-1][1][0], dp[-1][2][0], 0)\n\n\n\nif __name__ == '__main__':\n prices = [3, 3, 5, 0, 0, 3, 1, 4]\n prices = [1, 2, 3, 4, 5]\n obj = Solution()\n print(obj.maxProfit(prices))\n","sub_path":"suqing/fuckal/python/dp/best-time-to-buy-and-sell-stock-iii.py","file_name":"best-time-to-buy-and-sell-stock-iii.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"403527688","text":"# name.py\n# Walker M. White (wmw2)\n# August 30, 2015\n\"\"\"Module with a single, non-working function.\n\nThe function in this module has a bug (in the sense that it does not satisfy\nits specification). This allows us to show off debugging.\"\"\"\n\ndef last_name_first(n):\n \"\"\"Returns: copy of n but in the form 'last-name, first-name'\n \n Parameter n: the person's name\n Precondition: n is in the form 'first-name last-name'\n with one or more blanks between the two names no spaces\n in or \"\"\"\n \n end_first = n.find(' ')\n first = n[:end_first]\n last = n[end_first+1:]\n return last+', '+first\n\n","sub_path":"Python/lab05/presentation-08/presentation-08/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"428964021","text":"\n# Perovskites octahedral tilting extraction\n# based on Surf.Sci.602 3674 (2008)\n# http://dx.doi.org/10.1016/j.susc.2008.10.002\n# Author: Evgeny Blokhin\n#\n# KNOWN BUG: in some low-symmetry cases (\"batio3_lda_hw12d_160_to.out\"),\n# octahedra are not adjusted with the axes, and their distortion origin is unknown.\n# Even if the rotation is absent (i.e. pseudo-cubic structure),\n# an \"artificial\" rotation can be extracted\n\nfrom __future__ import division\nimport math\nfrom functools import reduce\n\nfrom numpy.linalg import norm\n\nfrom ase import Atom\n\nfrom tilde.core.common import ModuleError #, generate_xyz\nfrom tilde.core.constants import Perovskite_Structure\nfrom tilde.core.symmetry import SymmetryFinder\n\n\nclass Perovskite_tilting():\n OCTAHEDRON_BOND_LENGTH_LIMIT = 2.5 # Angstrom\n OCTAHEDRON_ATOMS_Z_DIFFERENCE = 1.6 # Angstrom\n MAX_TILTING_DEGREE = 22.4 # degrees, this is for adjusting, may produce unphysical results\n\n def __init__(self, tilde_calc):\n self.prec_angles = {} # non-rounded, non-unique, all-planes angles\n self.angles = {} # rounded, unique, one-plane angles\n\n symm = SymmetryFinder()\n symm.refine_cell(tilde_calc)\n if symm.error:\n raise ModuleError(\"Cell refinement error: %s\" % symm.error)\n\n # check if the longest axis is Z, rotate otherwise\n lengths = list(map(norm, symm.refinedcell.cell)) # Py3\n if not (lengths[2] - lengths[0] > 1E-6 and lengths[2] - lengths[1] > 1E-6):\n axnames = {0: 'x', 1: 'y'}\n principal_ax = axnames[ lengths.index(max(lengths[0], lengths[1])) ]\n symm.refinedcell.rotate(principal_ax, 'z', rotate_cell = True)\n\n self.virtual_atoms = symm.refinedcell.copy()\n\n #with open('tilting.xyz', 'w') as f:\n # f.write(generate_xyz(self.virtual_atoms))\n\n # translate atoms around octahedra in all directions\n shift_dirs = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (1, 1, 0), (1, -1, 0), (-1, -1, 0), (-1, 1, 0), (0, 0, 1), (0, 0, -1)]\n\n for k, i in enumerate(symm.refinedcell):\n if i.symbol in Perovskite_Structure.C:\n for sdir in shift_dirs:\n self.translate(k, symm.refinedcell.cell, sdir, self.virtual_atoms)\n\n # extract octahedra and their main tilting planes\n for octahedron in self.get_octahedra(symm.refinedcell, symm.refinedcell.periodicity):\n #print 'octahedron:', octahedron[0]+1 #, self.virtual_atoms[octahedron[0]].symbol, self.virtual_atoms[octahedron[0]].x, self.virtual_atoms[octahedron[0]].y, self.virtual_atoms[octahedron[0]].z\n #print 'corners:', [i+1 for i in octahedron[1]]\n\n # Option 1. Extract only one tilting plane, the closest to perpendicular to Z-axis\n '''tiltplane = self.get_tiltplane(octahedron[1])\n if len(tiltplane) == 4:\n t = self.get_tilting(tiltplane)\n #print 'result:', [i+1 for i in tiltplane], t\n self.prec_angles.update( { octahedron[0]: [ t ] } )'''\n\n # Option 2. Extract all three possible tilting planes,\n # try to spot the closest to perpendicular to Z-axis\n # and consider the smallest tilting\n plane_tilting = []\n for oplane in self.get_tiltplanes(octahedron[1]):\n t = self.get_tilting(oplane)\n #print \"result:\", [i+1 for i in oplane], t\n plane_tilting.append( t )\n\n self.prec_angles.update( { octahedron[0]: plane_tilting } )\n\n if not self.prec_angles: raise ModuleError(\"Cannot find any main tilting plane!\")\n\n # uniquify and round self.prec_angles to obtain self.angles\n u, todel = [], []\n for o in self.prec_angles:\n self.prec_angles[o] = reduce(lambda x, y: x if sum(x) <= sum(y) else y, self.prec_angles[o]) # only minimal angles are taken if tilting planes vary!\n self.prec_angles[o] = list(map(lambda x: list(map(lambda y: round(y, 2), x)), [self.prec_angles[o]])) # Py3\n for i in self.prec_angles[o]:\n u.append([o] + i)\n\n u = sorted(u, key=lambda x:x[0])\n u.reverse() # to make index of oct.centers minimal\n for i in u:\n for j in range(u.index(i)+1, len(u)):\n if i[1:] == u[j][1:]:\n todel.append(u.index(i))\n continue\n for i in [j for j in u if u.index(j) not in todel]:\n self.angles[ i[0]+1 ] = i[1:] # atomic index is counted from zero!\n\n def translate(self, num_of_atom, cell, components, reference):\n a_component, b_component, c_component = components\n reference.append(Atom(\n reference[num_of_atom].symbol,\n (reference[num_of_atom].x + a_component * cell[0][0] + b_component * cell[1][0] + c_component * cell[2][0],\n reference[num_of_atom].y + a_component * cell[0][1] + b_component * cell[1][1] + c_component * cell[2][1],\n reference[num_of_atom].z + a_component * cell[0][2] + b_component * cell[1][2] + c_component * cell[2][2])\n ))\n\n def get_bisector_point(self, num_of_A, num_of_O, num_of_B, reference):\n xA = reference[num_of_A].x\n yA = reference[num_of_A].y\n zA = reference[num_of_A].z\n xO = reference[num_of_O].x\n yO = reference[num_of_O].y\n zO = reference[num_of_O].z\n xB = reference[num_of_B].x\n yB = reference[num_of_B].y\n zB = reference[num_of_B].z\n m = self.virtual_atoms.get_distance(num_of_O, num_of_A)\n n = self.virtual_atoms.get_distance(num_of_O, num_of_B)\n\n # bisector length\n l = 2 * m * n * math.cos(math.radians(self.virtual_atoms.get_angle(num_of_A, num_of_O, num_of_B) / 2)) / (m + n)\n v = math.sqrt(n**2 - n * l**2 / m)\n u = m * v / n\n A = yA*(zO - zB) + yO*(zB - zA) + yB*(zA - zO)\n B = zA*(xO - xB) + zO*(xB - xA) + zB*(xA - xO)\n C = xA*(yO - yB) + xO*(yB - yA) + xB*(yA - yO)\n if C == 0: C = 1E-10 # prevent zero division\n D = xA*(yO*zB - yB*zO) + xO*(yB*zA - yA*zB) + xB*(yA*zO - yO*zA)\n D *= -1\n\n # from surface analytical equation\n x = (xA + u*xB/v)/(1+u/v)\n y = (yA + u*yB/v)/(1+u/v)\n z = -((A*x + B*y + D) / C)\n return [x, y, z]\n\n def get_octahedra(self, atoms, periodicity=3):\n '''\n Extract octahedra as lists of sequence numbers of corner atoms\n '''\n octahedra = []\n for n, i in enumerate(atoms):\n found = []\n if i.symbol in Perovskite_Structure.B:\n for m, j in enumerate(self.virtual_atoms):\n if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT:\n found.append(m)\n\n if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]):\n octahedra.append([n, found])\n\n if not len(octahedra): raise ModuleError(\"Cannot extract valid octahedra: not enough corner atoms found!\")\n return octahedra\n\n def get_tiltplane(self, sequence):\n '''\n Extract the main tilting plane basing on Z coordinate\n '''\n sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z)\n in_plane = []\n for i in range(0, len(sequence)-4):\n if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE:\n in_plane = [sequence[j] for j in range(i, i+4)]\n return in_plane\n\n def get_tiltplanes(self, sequence):\n '''\n Extract tilting planes basing on distance map\n '''\n tilting_planes = []\n distance_map = []\n\n for i in range(1, len(sequence)):\n distance_map.append([ sequence[i], self.virtual_atoms.get_distance( sequence[0], sequence[i] ) ])\n\n distance_map = sorted(distance_map, key=lambda x: x[1])\n\n if len(distance_map) == 4:\n # surface edge case\n # semi-octahedron at surface edge has only one tilting plane to consider\n sorted_dist = [i[0] for i in distance_map]\n if distance_map[-1][1] - distance_map[-2][1] < 0.5:\n # 1st case: max diff < 0.5 Angstrom,\n # meaning all distances to reference atom are similar,\n # therefore the reference atom is above the searched plane\n # and the searched plane consists of other atoms\n tilting_planes.append( [ i[0] for i in distance_map ] )\n else:\n # 2nd case: reference atom belongs to the searched plane,\n # procedure needs to be repeated with the next atom as reference atom\n candidates = [sequence[0], sorted_dist[-1]]\n next_distance_map = []\n next_distance_map.append([ sorted_dist[1], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[1] ) ])\n next_distance_map.append([ sorted_dist[2], self.virtual_atoms.get_distance( sorted_dist[0], sorted_dist[2] ) ])\n next_distance_map = sorted(next_distance_map, key=lambda x: x[1])\n next_sorted_dist = [i[0] for i in next_distance_map]\n\n # the next reference atom is taken above the plane (distances are similar)\n if next_distance_map[1][1] - next_distance_map[0][1] < 0.5: candidates.extend([ next_sorted_dist[0], next_sorted_dist[1] ])\n\n # the next reference atom is taken in the plane (distances are different)\n else: candidates.extend([ sorted_dist[0], next_sorted_dist[1] ])\n tilting_planes.append(candidates)\n\n elif len(distance_map) == 5:\n # full octahedron case\n # full octahedron has 3 different tilting planes (perpendicular in ideal case)\n sorted_dist = [i[0] for i in distance_map]\n\n # 1st plane is found as:\n first_plane = sorted_dist[0:4]\n tilting_planes.append(first_plane)\n distance_map_first_plane = []\n for i in range(1, 4):\n distance_map_first_plane.append([ first_plane[i], self.virtual_atoms.get_distance( first_plane[0], first_plane[i] ) ])\n distance_map_first_plane = sorted(distance_map_first_plane, key=lambda x: x[1])\n sorted_first_plane = [i[0] for i in distance_map_first_plane]\n\n # 2nd and 3rd planes are found as:\n tilting_planes.append([ sequence[0], sorted_dist[4], first_plane[0], sorted_first_plane[2] ])\n tilting_planes.append([ sequence[0], sorted_dist[4], sorted_first_plane[0], sorted_first_plane[1] ])\n\n # filter planes by Z according to octahedral spatial compound\n filtered = list(filter(lambda x:\n abs(self.virtual_atoms[ x[0] ].z - self.virtual_atoms[ x[1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ x[1] ].z - self.virtual_atoms[ x[2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \\\n abs(self.virtual_atoms[ x[2] ].z - self.virtual_atoms[ x[3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE,\n tilting_planes\n )) # Py3\n if len(filtered): tilting_planes = filtered\n\n return tilting_planes\n\n def get_tilting(self, oplane):\n '''\n Main procedure\n '''\n surf_atom1, surf_atom2, surf_atom3, surf_atom4 = oplane\n\n # divide surface atoms into groups by distance between them\n compare = [surf_atom2, surf_atom3, surf_atom4]\n distance_map = []\n\n for i in range(0, 3):\n distance_map.append([ compare[i], self.virtual_atoms.get_distance(surf_atom1, compare[i]) ])\n\n distance_map = sorted(distance_map, key=lambda x: x[1])\n\n distance_map_keys = [i[0] for i in distance_map]\n surf_atom3 = distance_map_keys[2]\n surf_atom2 = distance_map_keys[1]\n surf_atom4 = distance_map_keys[0]\n\n if self.virtual_atoms[surf_atom1].z == self.virtual_atoms[surf_atom2].z and \\\n self.virtual_atoms[surf_atom2].z == self.virtual_atoms[surf_atom3].z and \\\n self.virtual_atoms[surf_atom3].z == self.virtual_atoms[surf_atom4].z:\n # this is done to prevent false zero tilting\n self.virtual_atoms[surf_atom1].z += 1E-10\n self.virtual_atoms[surf_atom2].z += 1E-10\n self.virtual_atoms[surf_atom3].z -= 1E-10\n self.virtual_atoms[surf_atom4].z -= 1E-10\n\n # new axes will be defined simply as vectors standing on 1 - 3 and 2 - 4 (they are moved to the point of origin)\n self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom1].x - self.virtual_atoms[surf_atom3].x, self.virtual_atoms[surf_atom1].y - self.virtual_atoms[surf_atom3].y, self.virtual_atoms[surf_atom1].z - self.virtual_atoms[surf_atom3].z)))\n self.virtual_atoms.append(Atom('X', (self.virtual_atoms[surf_atom2].x - self.virtual_atoms[surf_atom4].x, self.virtual_atoms[surf_atom2].y - self.virtual_atoms[surf_atom4].y, self.virtual_atoms[surf_atom2].z - self.virtual_atoms[surf_atom4].z)))\n self.virtual_atoms.append(Atom('X', (0, 0, 0)))\n\n # redefine tilted axes\n surf_atom_first = len(self.virtual_atoms)-3\n surf_atom_second = len(self.virtual_atoms)-2\n center = len(self.virtual_atoms)-1\n\n # inverse arbitrary atom\n self.virtual_atoms.append(Atom('X', (-self.virtual_atoms[surf_atom_first].x, -self.virtual_atoms[surf_atom_first].y, -self.virtual_atoms[surf_atom_first].z)))\n inversed_one = len(self.virtual_atoms)-1\n\n # find and add bisectors, silly swapping\n first_bisector = self.get_bisector_point(surf_atom_first, center, surf_atom_second, self.virtual_atoms)\n sec_bisector = self.get_bisector_point(surf_atom_second, center, inversed_one, self.virtual_atoms)\n\n swap = True\n if first_bisector[0] < 0 and sec_bisector[0] < 0:\n swap = False\n if first_bisector[0] < 0:\n first_bisector[0] *= -1\n first_bisector[1] *= -1\n first_bisector[2] *= -1\n if sec_bisector[0] < 0:\n sec_bisector[0] *= -1\n sec_bisector[1] *= -1\n sec_bisector[2] *= -1\n if swap:\n first_bisector, sec_bisector = sec_bisector, first_bisector\n\n swap = False\n if first_bisector[0] < sec_bisector[0] and first_bisector[1] < 0:\n first_bisector[0] *= -1\n first_bisector[1] *= -1\n first_bisector[2] *= -1\n swap = True\n if first_bisector[0] < sec_bisector[0] and first_bisector[1] > 0:\n swap = True\n if first_bisector[0] > sec_bisector[0] and sec_bisector[1] < 0:\n sec_bisector[0] *= -1\n sec_bisector[1] *= -1\n sec_bisector[2] *= -1\n if swap:\n first_bisector, sec_bisector = sec_bisector, first_bisector\n\n self.virtual_atoms.append(Atom('X', (first_bisector[0], first_bisector[1], first_bisector[2])))\n self.virtual_atoms.append(Atom('X', (sec_bisector[0], sec_bisector[1], sec_bisector[2])))\n first_bisector = len(self.virtual_atoms)-2\n sec_bisector = len(self.virtual_atoms)-1\n\n # use vector cross product to define normal which will play Z axis role\n self.virtual_atoms.append(Atom('X', (\n self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].z - self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].y,\n self.virtual_atoms[first_bisector].z*self.virtual_atoms[sec_bisector].x - self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].z,\n self.virtual_atoms[first_bisector].x*self.virtual_atoms[sec_bisector].y - self.virtual_atoms[first_bisector].y*self.virtual_atoms[sec_bisector].x\n )))\n tilt_z = len(self.virtual_atoms)-1\n\n # Euler angles ZYZ\n alpha = math.degrees(math.atan2(self.virtual_atoms[sec_bisector].z, self.virtual_atoms[first_bisector].z))\n beta = math.degrees(math.atan2(math.sqrt(self.virtual_atoms[tilt_z].x**2 + self.virtual_atoms[tilt_z].y**2), self.virtual_atoms[tilt_z].z))\n gamma = math.degrees(math.atan2(self.virtual_atoms[tilt_z].y, -self.virtual_atoms[tilt_z].x))\n\n # angles adjusting\n adjust_angles = [45, 90, 135, 180, 225, 270, 315, 360]\n tilting = [alpha, beta, gamma]\n for i in range(0, 3):\n tilting[i] = abs(tilting[i])\n if tilting[i] in adjust_angles:\n tilting[i] = 0.0\n continue\n\n if tilting[i] > self.MAX_TILTING_DEGREE:\n for checkpoint in adjust_angles:\n if checkpoint - self.MAX_TILTING_DEGREE < tilting[i] < checkpoint + self.MAX_TILTING_DEGREE:\n tilting[i] = abs(tilting[i] - checkpoint)\n break\n return tilting\n","sub_path":"tilde/apps/perovskite_tilting/perovskite_tilting.py","file_name":"perovskite_tilting.py","file_ext":"py","file_size_in_byte":17287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"449758878","text":"\"\"\"\nImporting the required libraries\n\"\"\"\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\n\"\"\"\nWhen running from the terminal following arguments have to be specified\n\"\"\"\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n help=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-i\", \"--shape\", type=str, required=True,\n help=\"path to the image that containes shape\")\nap.add_argument(\"-o\", \"--output\", type=str,\n\thelp=\"path to optional output video file\")\nargs = vars(ap.parse_args())\n\"\"\"\nClasses here characterizes the objects that this particular code can recognize\n\"\"\"\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\"\"\"\nLoading the prototext and caffemodel\n\"\"\"\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\"\"\"\ninitialize the video stream, allow the camera sensor to warmup,\nand initialize the FPS counter\n\"\"\"\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\ntime.sleep(2.0)\nfps = FPS().start()\n\ndef nothing(x):\n # any operation\n pass\n\n\"\"\"\nDefining the trackers for the frame so as to manually change on which color to detect\n\"\"\"\ncap = cv2.VideoCapture(0)\ncv2.namedWindow(\"Trackbars\")\ncv2.createTrackbar(\"L-H\", \"Trackbars\", 0, 180, nothing)\ncv2.createTrackbar(\"L-S\", \"Trackbars\", 66, 255, nothing)\ncv2.createTrackbar(\"L-V\", \"Trackbars\", 134, 255, nothing)\ncv2.createTrackbar(\"U-H\", \"Trackbars\", 180, 180, nothing)\ncv2.createTrackbar(\"U-S\", \"Trackbars\", 255, 255, nothing)\ncv2.createTrackbar(\"U-V\", \"Trackbars\", 243, 255, nothing)\n\"\"\"\nTaking as an input the shape to be detected.\nAlso finding it contours.\n\"\"\"\npath = args[\"shape\"]\nimg = cv2.imread(path,0)\nret, thresh = cv2.threshold(img, 127,255,0)\ncontours, _ = cv2.findContours(thresh, 2,1)\ncnt1 = contours[0]\n\"\"\"\ninitializing the writer object\n\"\"\"\nwriter = None\nwhile True:\n \"\"\"\n getting the frame from the webcam/ipcam and resizing it to a specific size\n \"\"\"\n frame = vs.read()\n frame = imutils.resize(frame, width=1000)\n \"\"\"\n grab the frame dimensions and convert it to a blob\n \"\"\"\n (h, w) = frame.shape[:2]\n \"\"\"\n specifying the video format\n \"\"\"\n if args[\"output\"] is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MP4V\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n (w, h), True)\n \"\"\"\n coverting the fram from BGR to HSV\n \"\"\"\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \"\"\"\n getting the pointer position from the trackbar\n \"\"\"\n l_h = cv2.getTrackbarPos(\"L-H\", \"Trackbars\")\n l_s = cv2.getTrackbarPos(\"L-S\", \"Trackbars\")\n l_v = cv2.getTrackbarPos(\"L-V\", \"Trackbars\")\n u_h = cv2.getTrackbarPos(\"U-H\", \"Trackbars\")\n u_s = cv2.getTrackbarPos(\"U-S\", \"Trackbars\")\n u_v = cv2.getTrackbarPos(\"U-V\", \"Trackbars\")\n \"\"\"\n specifying the range of yellow color to be detected\n \"\"\"\n lower_red = np.array([20, 110, 110])\n upper_red = np.array([40, 255, 255])\n \"\"\"\n specifying the range of the color dynamically\n \"\"\"\n lower = np.array([l_h,l_s,l_v])\n upper = np.array([u_h,u_s,u_v])\n \"\"\"\n the following snippet will show a blacked out frame.\n This frame highlights the portion of the frame where color is detected.\n \"\"\"\n mask = cv2.inRange(hsv, lower, upper)\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.erode(mask, kernel)\n font = cv2.FONT_HERSHEY_COMPLEX\n \"\"\"\n drawing the line at the center\n \"\"\"\n cv2.line(frame, (0, h//2), (w, h//2), (0, 255, 0), 2)\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),\n 0.007843, (300, 300), 127.5)\n \"\"\"\n pass the blob through the network and obtain the detections and predictions\n \"\"\"\n net.setInput(blob)\n detections = net.forward()\n \"\"\"\n loop over the detections\n \"\"\"\n for i in np.arange(0, detections.shape[2]):\n \"\"\"\n Extracting the confidence\n \"\"\"\n confidence = detections[0, 0, i, 2]\n \"\"\"\n removing the unnecesaary detections based on the confidence\n \"\"\"\n if confidence > args[\"confidence\"]:\n \"\"\"\n extract the index of the class label from the\n detections`, then compute the (x, y)-coordinates of\n the bounding box for the object\n \"\"\"\n idx = int(detections[0, 0, i, 1])\n if CLASSES[idx] != \"bottle\":\n continue\n \"\"\"\n drawing the detection on the fram using a rectangle border.\n tracking this border to check whether the bottle moves\n beyond the specified limits\n \"\"\"\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\") \n label = \"{}: {:.2f}%\".format(CLASSES[idx],\n confidence * 100)\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n COLORS[idx], 2) \n center = startY+endY/2\n if center > h//2:\n cv2.putText(frame, 'BOTTLE CROSSED', (500, h-40), \n cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2) \n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(frame, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) \n\n \"\"\"\n checkng the cv2 version installed in the system\n and getting the contours from the blacked out frame\n \"\"\"\n if int(cv2.__version__[0]) > 3:\n contours, _ = cv2.findContours(mask, 2, 1)\n else:\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \"\"\"\n looping over the contours\n \"\"\"\n for cnt in contours:\n \"\"\"\n calculating the area\n approximating the points to identify the shapes\n \"\"\"\n area = cv2.contourArea(cnt)\n approx = cv2.approxPolyDP(cnt, 0.02*cv2.arcLength(cnt, True), True)\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n \"\"\"\n the shape detected is then compared with the base shape 'star'\n \"\"\"\n dist = cv2.matchShapes(cnt, cnt1, 1, 0.0)\n \"\"\"\n if the shape is matched, it will draw the borders around the shape \n using cv2 and a rectangle box keeping the shape in the center. \n Also shape tracking is done to check whether it moves \n beyond a specified limit or not\n \"\"\"\n if area > 400 and dist < 0.001:\n cv2.drawContours(frame, [approx], 0, (0, 255, 0), 2)\n (a,b,c,d) = cv2.boundingRect(cnt)\n cv2.rectangle(frame, (a,b), (a+c,b+d),(255,0,0),1)\n cv2.putText(frame, \"MATCHED\", (x, y), font, 1, (255, 0, 0))\n center = (b + b + d)/2\n if center > h//2:\n cv2.putText(frame, 'SHAPE CROSSED', (500, h-40),\n cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2)\n \"\"\"\n if the writer object is true, it will write the entire tracking into \n a video file\n \"\"\"\n if writer is not None:\n writer.write(frame)\n \"\"\"\n displaying the frame\n \"\"\"\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n fps.update()\n \nfps.stop()\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"obj_shape.py","file_name":"obj_shape.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"63689028","text":"\"\"\"\nProject Euler Problem 9\n=======================\n\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for\nwhich,\n a^2 + b^2 = c^2\n\nFor example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\"\"\"\n\n\ndef solution():\n for c in range(997, 2, -1):\n a, b = 1, 999 - c\n target = c ** 2\n while a < b:\n if a ** 2 + b ** 2 == target:\n return a * b * c\n a += 1\n b -= 1\n\n\nprint(solution())\n","sub_path":"009.py","file_name":"009.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"264579256","text":"from flask import Flask, render_template, request, redirect\nfrom flask_login import login_required\nfrom flask_migrate import Migrate\nfrom social_flask.routes import social_auth\nfrom social_flask_sqlalchemy.models import init_social\nimport models\n\n\ndef setup_app_routes(app):\n @app.before_request\n def force_ssl():\n if not app.config['SKIP_SSL'] and request.url.startswith('http://'):\n new = request.url.replace('http://', 'https://', 1)\n return redirect(new, code=301)\n\n # @app.route('/login')\n # def login():\n # return render_template('login.html')\n\n @app.route('/')\n @app.route('/')\n # @login_required\n def index(path=None):\n return 'coming soon'\n # return render_template('app.html')\n\n\ndef register_blueprints(app):\n app.register_blueprint(social_auth)\n\n\ndef init_libraries(app):\n models.db.init_app(app)\n Migrate(app, models.db)\n init_social(app, models.db.session)\n models.user.init_auth(app)\n\n\ndef create_app(config='config'):\n app = Flask(__name__)\n app.config.from_object(config)\n\n init_libraries(app)\n register_blueprints(app)\n setup_app_routes(app)\n\n return app\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"40306762","text":"\"\"\"\nGraph class and functionality\n\"\"\"\nclass Graph:\n routes = []\n \n \n def __init__(self, cities, routes, city_dict, convert):\n \"\"\"\n Constructor for the Graph\n \"\"\"\n self.city_info = cities\n self.route_info = routes\n self.city_dict = city_dict\n self.convert = convert\n \n for route in routes:\n port = route[\"ports\"]\n self.routes.append((port[0].encode(\"utf-8\"),port[1].encode(\"utf-8\")))\n \n def print_cities(self):\n \"\"\"\n List of all cities\n \"\"\"\n for city in self.city_info:\n print(city[\"name\"])\n \n def get_city_info(self, city_name):\n \"\"\"\n Information of a particular city\n \"\"\"\n flag =0\n for check in self.city_info:\n if(check[\"name\"].encode(\"utf-8\") == city_name):\n print(\"Name: \" + check[\"name\"].encode(\"utf-8\"))\n print(\"Code: \" + check[\"code\"].encode(\"utf-8\"))\n print(\"Country: \" + check[\"country\"].encode(\"utf-8\"))\n print(\"Continent: \" + check[\"continent\"].encode(\"utf-8\"))\n print(\"Timezone: {}\" ).format(check[\"timezone\"])\n print(\"Coordinates: {}\" ).format(check[\"coordinates\"])\n print(\"Population: {}\" ).format(check[\"population\"])\n print(\"Region: {}\" ).format(check[\"region\"])\n flag=1\n \n if(flag==0):\n print(\"Invalid Input\")\n \n def longest_flight(self):\n \"\"\"\n Distance and endpoints of the longest flight\n \"\"\"\n max =0\n max_route = None\n \n for route in self.route_info:\n if(route[\"distance\"]>max):\n max = route[\"distance\"]\n max_route = route[\"ports\"]\n \n print(\"Longest flight is:\")\n print(\"From: {} to {}\").format(max_route[0].encode(\"utf-8\"), max_route[1].encode(\"utf-8\"))\n print(\"Distance of: {}\").format(max)\n return max\n \n def shortest_flight(self):\n \"\"\"\n Distance and endpoints of the shortest flight\n \"\"\"\n min =100000000\n min_route = None\n \n for route in self.route_info:\n if(route[\"distance\"]max):\n max = city[\"population\"]\n max_city = city[\"name\"]\n \n print(\"Biggest City served by CSAir: \")\n print(\"{}, with a population of {}\").format(max_city, max)\n return max\n \n def smallest_city(self):\n \"\"\"\n Calculates city with the least population\n \"\"\"\n min = 100000000000000\n min_city = None\n \n for city in self.city_info:\n if(city[\"population\"]max):\n max = len((self.city_dict[key]).flights_in)\n \n \n for key in self.city_dict:\n if(len((self.city_dict[key]).flights_in)==max): \n hub_cities.append((self.city_dict[key]).name) \n \n \n print(\"{} are CSAir's main hub cities\").format(hub_cities)\n \n def visualize(self):\n \"\"\"\n URL to visualize entire network\n \"\"\"\n url = \"http://www.gcmap.com/mapui?P=\"\n \n for route in self.routes:\n url = url+route[0] + \"-\" + route[1]\n url = url + \",+\"\n \n url = url[:-2]\n print(url)\n \n \n ","sub_path":"CSAir2.0/Assignment2.0/src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"153475443","text":"#!/usr/bin/env python3\n\n# Author: Jeffrey Grover\n# Purpose: Annotate the differentially expressed genes from DESeq2 with\n# start/stop and feature information from a gff3. This will also work with any\n# gene list where geneID is in the first column of a tsv/csv\n# Created: 10/2017\n\nimport csv\nfrom argparse import ArgumentParser\n\n# Function definitions go here\n\n\ndef parse_deseq2(input_file, file_dialect):\n features_dict = {}\n with open(input_file, 'r') as input_handle:\n if file_dialect == 'csv':\n results = csv.reader(input_handle)\n elif file_dialect == 'tsv':\n results = csv.reader(input_handle, delimiter='\\t')\n field_names = next(results)\n for row in results:\n feature_id = row[0]\n remaining_fields = row[1:]\n features_dict[feature_id] = [remaining_fields]\n return [features_dict, field_names]\n\n\ndef parse_gff(input_gff, gff_feature):\n gff_dict = {}\n with open(input_gff, 'r') as input_handle:\n gff3 = csv.reader(\n (row for row in input_handle if not row.startswith('#')),\n delimiter='\\t')\n for row in gff3:\n if row[2] == gff_feature:\n chromosome = row[0]\n feature = row[2]\n start = int(row[3])\n stop = int(row[4])\n strand = row[6]\n feature_id = int(''.join(\n filter(str.isdigit, str(row[8].split(';')[0])[3:])))\n if chromosome not in gff_dict:\n gff_dict[chromosome] = {}\n if feature_id not in gff_dict[chromosome]:\n gff_dict[chromosome][feature_id] = {}\n gff_dict[chromosome][feature_id] = [\n feature, start, stop, strand\n ]\n return gff_dict\n\n\ndef annotate_results(features_dict, gff3_dict, output_file, header):\n with open(output_file, 'w') as output_handle:\n output_file = csv.writer(output_handle)\n output_file.writerow(header)\n for feature in features_dict:\n feature_digits = int(''.join(filter(str.isdigit, feature)))\n for chromosome in gff3_dict:\n if feature_digits in gff3_dict[chromosome]:\n feature_type = gff3_dict[chromosome][feature_digits][0]\n start = gff3_dict[chromosome][feature_digits][1]\n stop = gff3_dict[chromosome][feature_digits][2]\n strand = gff3_dict[chromosome][feature_digits][3]\n deseq2_info = features_dict[feature][0]\n output_row = [\n chromosome, feature, feature_type, start, stop, strand\n ] + deseq2_info\n output_file.writerow(output_row)\n\n\n# Parse command line options\n\nparser = ArgumentParser(\n description='Annotate the differentially expressed genes from DESeq2 with '\n 'start/stop and feature information from a gff3. This will also work with '\n 'any gene/feature list where geneID is in the first column of a tsv/csv')\nparser.add_argument('--gff', help='Input gff3 file', metavar='File')\nparser.add_argument('--deseq', help='Input ShortStack Report', metavar='File')\nparser.add_argument('-d', '--deseq_dialect', help='tsv or csv')\nparser.add_argument('-f', '--feature', help='String matching a gff feature')\n\ngff_file = parser.parse_args().gff\ndeseq2_file = parser.parse_args().deseq\ndeseq2_dialect = parser.parse_args().deseq_dialect\nfeature = parser.parse_args().feature\noutput_file = deseq2_file.rsplit('.')[0] + '_annotated.csv'\n\n# Run the functions to create dictionaries\n\ndeseq2_result = parse_deseq2(deseq2_file, deseq2_dialect)\ndeseq2_dict = deseq2_result[0]\ndeseq2_header = deseq2_result[1]\ngff3_dict = parse_gff(gff_file, feature)\n\n# Create the header for the output file\n\noutput_header = [\n 'chromosome', 'feature_id', 'type', 'start', 'stop', 'strand'\n] + deseq2_result[1][1:]\n\n# Run the function to annotate the file\n\nannotate_results(deseq2_dict, gff3_dict, output_file, output_header)\n","sub_path":"deseq2_results_gff_annotate.py","file_name":"deseq2_results_gff_annotate.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"288134916","text":"import codecs\nfrom os import path\n\n\nfrom preprocessing.language_standardizer import eng\n\nPREPROCESS_SUFFIX = '_PRE'\n\n\nclass Preprocessor(object):\n \"\"\"\n Preprocessor\n \"\"\"\n def __init__(self, file_name, input_dir, output_dir, standardizer=eng):\n \"\"\"\n Parameters:\n file_name - name of input file\n \"\"\"\n self.standardizer = standardizer\n self.input_dir = input_dir\n self.file_name = file_name\n self.output_dir = output_dir\n\n def process(self):\n output_name = self.file_name + PREPROCESS_SUFFIX\n in_file = path.join(self.input_dir, self.file_name)\n out_file = path.join(self.output_dir, output_name)\n with codecs.open(in_file, encoding='UTF-8') as f:\n with codecs.open(out_file, mode='w+', encoding='UTF-8') as o:\n for line in f:\n processed_line = self.standardizer.standardize_line(line)\n o.write(processed_line)\n","sub_path":"preprocessing/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"156624508","text":"from BuildMinHeap import MinHeap\nfrom random import randint\n\n# 1. Create Heaps.\n# 2. Check each Heap.\n# 3. Run this whole code.\n\ndef create_heap():\n \"\"\" creates heap for testing \"\"\"\n\n no_tst = randint(100, 1000)\n heap_lst = []\n for i in range(no_tst):\n size = randint(0, 1000)\n arr = []\n for i in range(size):\n ele = randint(0, 100000000)\n arr.append(ele)\n obj = MinHeap(size, arr)\n obj.buildHeap()\n heap_lst.append(obj)\n \n return heap_lst\n\n\ndef check_min_heap(test_cases):\n \"\"\" Checks for a min heap \"\"\"\n\n result = [False for i in range(len(test_cases))]\n for idx, obj in enumerate(test_cases):\n for i in range(1, obj.heapSize):\n if obj.heapCheck == False:\n result[idx] = False\n break\n else:\n result[idx] = True\n\n return result\n\n\ndef final_check(result):\n \"\"\" Displays the final result \"\"\"\n \n ret_val = True\n for flag in result:\n if flag == False:\n return False\n return True\n\n\ndef main():\n \"\"\" The main function \"\"\"\n\n test_cases = create_heap()\n result = check_min_heap(test_cases)\n pass_res = final_check(result)\n if final_check:\n print('This passes the test cases.')\n else:\n print('The code does not work properly')\n\nif __name__ == '__main__':\n main()","sub_path":"Heaps/001MinHeapTest.py","file_name":"001MinHeapTest.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"86014931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 14:51:55 2013\n\n@author: cimatori\n\"\"\"\n\n# 1. General\n# Data path\nfrom socket import gethostname\nif gethostname()=='sboron2':\n BaseDir = '/run/media/sambarluc/Cimatoribus1/Analyses/LIS131/'\nelse:\n BaseDir = '/media/scratch/Analyses/LIS131/'\n#Output directory\nOutDir = BaseDir+'TemperatureDissipation/'\n# Data dir\nDetailFile = BaseDir+'ProcessData/results/Output_data_stex.chn'\n\n# Are we computing slopes of NoFilter or Taylor data?\ndata_type = 'Taylor'\n\n# Start-end dates\nStart = 104.\nEnd = 224.\n\n# Range of increments in the inertial range for\n# tide up\nif data_type=='NoFilter':\n limsup = ([8,1.e3], # set 1-36\n [8,1.e3], # set 37-72\n [8,1.e3], # set 73-108\n [8,1.e3]) # set 109-144\n limsdown=([8,1.e3], # set 1-36\n [8,1.e3], # set 37-72\n [8,1.e3], # set 73-108\n [8,1.e3]) # set 109-144\nelif data_type=='Taylor':\n limsup = ([2.0,60], # set 1-72\n [2.5,60]) # set 73-144\n limsdown=([2,100], # set 1-72\n [2,18]) # set 73-144\n limslarge = 300.\nelif data_type=='TaylorHi': # Taylor high passed\n limsup = ([1,100], # set 1-36\n [1,100], # set 37-72\n [1,100], # set 73-108\n [1,100]) # set 109-144\n limsdown=([1,100], # set 1-36\n [1,100], # set 37-72\n [1,100], # set 73-108\n [1,100]) # set 109-144\ndTs = (limsup, limsdown)\n\n# Which thermistor sets should be computed?\nsets = ['73-108','109-144']\n\n# Order of moments to compute\nMoms = range(1,11)\nnM = len(Moms)\n\n# Which sets to consider\nsetNames = ('AB','CD')\nnSets = len(setNames)\n\nsetLabels = dict(zip(setNames,('A','B','C','D')))\nsetThms = dict(zip(setNames,(range(0,36),range(36,72),range(72,108),range(108,144))))\n\n# Style for point plots\npStyle = dict(ms=8, alpha=0.5, ls='none', mew=1.2)\npStyle2 = dict(ms=4, alpha=0.8, ls='none')\n# Style for fill_between plots\nfStyle = dict(alpha=0.2, lw=1.)\n\n# Markers for tidal phases\nmarkT = ('^','o')\nnamT = ('up', 'down')\ncolorsT = ('b','r')\n\n# Number of tidal phases\nnT = len(namT)\n\n# Step of Taylor data, in case we are using it\nStepX = 0.2\n\n# Colors for plotting\nfrom matplotlib.pyplot import cm\ncolorsSet = cm.get_cmap('jet',nSets)(range(nSets))\nmarkSet = ('o','*','^','s')\n\n# Label symbols\nif data_type=='NoFilter':\n delta = '$\\\\Delta \\\\theta_\\\\tau$'\n spacing = '$\\\\tau$ $\\\\mathrm{[s]}$'\n axis = '\\\\tau'\nelif data_type in ('Taylor', 'TaylorHi'):\n delta = '$\\\\Delta \\\\theta_x$'\n spacing = '$r$ $\\\\mathrm{[m]}$'\n axis = 'r'\norder = '$q$'\nexponent= '$\\\\zeta_q$'\n\n# Style for tidal phases\nmarkT = ('^','o')\nnamT = ('up', 'down')\ncolorsT = ('b','r')\n\n# Number of tidal phases\nnT = len(namT)\n\n# File names\nif data_type=='NoFilter':\n files = [OutDir+'results/Moments_{}_NoFilter_day_{}_{}.npz' \\\n .format(sn,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/MomentSlopes_NoFilter_day_{}_{}.pdf' \\\n .format(Start,End)\nelif data_type=='Taylor':\n files = [OutDir+'results/Moments_{}_Taylor_StepX_{}_day_{}_{}.npz' \\\n .format(sn,StepX,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/Taylor_StepX_{}/MomentSlopes_day_{}_{}.tif' \\\n .format(StepX,Start,End)\n fig_file_L = OutDir+'figures/Taylor_StepX_{}/MomentSlopesLarge_day_{}_{}.png' \\\n .format(StepX,Start,End)\nelif data_type=='TaylorHi':\n files = [OutDir+'results/Moments_{}_TaylorHighPass_StepX_{}_day_{}_{}.npz' \\\n .format(sn,StepX,Start,End) for sn in setNames]\n fig_file = OutDir+'figures/Taylor_StepX_{}/MomentSlopes_HighPass_day_{}_{}.pdf' \\\n .format(StepX,Start,End)\n","sub_path":"LIS131/TemperatureDissipation/ConfigMomentSlopes.py","file_name":"ConfigMomentSlopes.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"192431444","text":"from .models import MoonDay, Ritual\nfrom datetime import *\n\n\n\ndef clear_year(year):\n return MoonDay.objects.filter(year=year).delete()\n\ndef fill_default_calendar(year, save=False):\n days = MoonDay.objects.filter(year=year)\n fd = datetime(year, 1, 1)\n ld = datetime(year+1,1,1)\n dd = ld-fd\n \n sdn=0\n sdmn=6 \n \n for i in range(sdn, dd.days):\n nmd = MoonDay(year=year, day_no=i, moon_day_no=sdmn, morning_hural=Ritual.objects.get(pk=13), day_hural=Ritual.objects.get(pk=14))\n if nmd.date().weekday() == 4:\n nmd.day_hural = Ritual.objects.get(pk=16) # юроол\n elif nmd.date().weekday() == 5:\n nmd.day_hural = Ritual.objects.get(pk=17) # банзарагша\n elif nmd.date().weekday() == 6:\n nmd.morning_hural = Ritual.objects.get(pk=15) # намсарай\n nmd.day_hural = Ritual.objects.get(pk=18) # алтэн гэрэл\n tl = [(25, 31),(23, 29),(25, 31),(24, 30),(25, 31),(24, 30),(25, 31),(25, 31), (24, 30),(25, 31),(24, 30),(25, 31)] if dd.days == 366 else [(25, 31),(22, 28),(25, 31),(24, 30),(25, 31),(24, 30),(25, 31),(25, 31), (24, 30),(25, 31),(24, 30),(25, 31)] \n if nmd.date().day in range(tl[nmd.date().month-1][0], tl[nmd.date().month-1][1]):\n nmd.day_hural = Ritual.objects.get(pk=19) # сундуй\n \n sdmn = sdmn + 1 if sdmn < 29 else 1\n \n if save: \n nmd.save()\n print (str(nmd))\n\n return days","sub_path":"kalachakra/saraswati/cal_helpers.py","file_name":"cal_helpers.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"186688900","text":"import unittest\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\n\nfrom avalanche.models import SimpleMLP\nfrom avalanche.training.plugins import ExperienceBalancedStoragePolicy, \\\n ClassBalancedStoragePolicy, ReplayPlugin\nfrom avalanche.training.strategies import Naive\n\nfrom tests.unit_tests_utils import get_fast_scenario\n\n\nclass ReplayTest(unittest.TestCase):\n def test_replay_balanced_memory(self):\n mem_size = 25\n policies = [None,\n ExperienceBalancedStoragePolicy({}, mem_size=mem_size),\n ClassBalancedStoragePolicy({}, mem_size=mem_size)]\n for policy in policies:\n self._test_replay_balanced_memory(policy, mem_size)\n\n def _test_replay_balanced_memory(self, storage_policy, mem_size):\n scenario = get_fast_scenario(use_task_labels=True)\n model = SimpleMLP(input_size=6, hidden_size=10)\n replayPlugin = ReplayPlugin(mem_size=mem_size,\n storage_policy=storage_policy)\n cl_strategy = Naive(\n model,\n SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),\n CrossEntropyLoss(), train_mb_size=32, train_epochs=1,\n eval_mb_size=100, plugins=[replayPlugin]\n )\n\n n_seen_data = 0\n for step in scenario.train_stream:\n n_seen_data += len(step.dataset)\n mem_fill = min(mem_size, n_seen_data)\n cl_strategy.train(step)\n ext_mem = replayPlugin.ext_mem\n lengths = []\n for task_id in ext_mem.keys():\n lengths.append(len(ext_mem[task_id]))\n self.assertEqual(sum(lengths), mem_fill) # Always fully filled\n\n def test_balancing(self):\n p1 = ExperienceBalancedStoragePolicy({}, 100, adaptive_size=True)\n p2 = ClassBalancedStoragePolicy({}, 100, adaptive_size=True)\n\n for policy in [p1, p2]:\n self.assert_balancing(policy)\n\n def assert_balancing(self, policy):\n ext_mem = policy.ext_mem\n scenario = get_fast_scenario(use_task_labels=True)\n replay = ReplayPlugin(mem_size=100, storage_policy=policy)\n model = SimpleMLP(num_classes=scenario.n_classes)\n\n # CREATE THE STRATEGY INSTANCE (NAIVE)\n cl_strategy = Naive(model,\n SGD(model.parameters(), lr=0.001),\n CrossEntropyLoss(), train_mb_size=100,\n train_epochs=0,\n eval_mb_size=100, plugins=[replay], evaluator=None)\n\n for exp in scenario.train_stream:\n cl_strategy.train(exp)\n print(list(ext_mem.keys()), [len(el) for el in ext_mem.values()])\n\n # buffer size should equal self.mem_size if data is large enough\n len_tot = sum([len(el) for el in ext_mem.values()])\n assert len_tot == policy.mem_size\n","sub_path":"tests/training/test_replay.py","file_name":"test_replay.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"545888119","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef remove_keymap_conflicts(new_keys_set):\n for prop in plt.rcParams:\n if prop.startswith('keymap.'):\n keys = plt.rcParams[prop]\n remove_list = set(keys) & new_keys_set\n for key in remove_list:\n keys.remove(key)\n\ndef multi_slice_viewer(volume, view='axial',\n overlay_1=None, overlay_1_cmap='RdYlGn', overlay_1_alpha=0.5, overlay_1_thres=0.5,\n overlay_2=None, overlay_2_cmap='Wistia', overlay_2_alpha=0.5, overlay_2_thres=0.5,\n title=''):\n\n assert view in ['axial', 'sagittal', 'coronal']\n remove_keymap_conflicts({'j', 'k'})\n\n # change view\n rotation = None\n if view == 'axial':\n rotation = lambda img : img.copy()\n elif view == 'sagittal':\n rotation = lambda img : np.rot90(np.rot90(img.copy(), axes=(0,2)), axes=(1,2))\n elif view == 'coronal':\n rotation = lambda img : np.rot90(img.copy(), axes=(1,0))\n\n fig, ax = plt.subplots()\n plt.xticks([], [])\n plt.yticks([], [])\n ax.volume = rotation(volume)\n ax.index = ax.volume.shape[0] // 2\n ax.imshow(ax.volume[ax.index], cmap='gray', vmin=np.min(ax.volume), vmax=np.max(ax.volume), interpolation='bilinear')\n if overlay_1 is not None:\n ax.volume_2 = np.ma.masked_where(rotation(overlay_1) < overlay_1_thres, rotation(overlay_1))\n ax.imshow(ax.volume_2[ax.index], vmin=np.min(ax.volume_2) , vmax=np.max(ax.volume_2), cmap=overlay_1_cmap, alpha=overlay_1_alpha)\n if overlay_2 is not None:\n ax.volume_3 = np.ma.masked_where(rotation(overlay_2) < overlay_2_thres, rotation(overlay_2))\n ax.imshow(ax.volume_3[ax.index], vmin=np.min(ax.volume_3) , vmax=np.max(ax.volume_3), cmap=overlay_2_cmap, alpha=overlay_2_alpha)\n plt.title(title)\n plt.xlabel(ax.index)\n fig.canvas.mpl_connect('key_press_event', process_key)\n fig.canvas.mpl_connect('scroll_event', process_scroll)\n\ndef process_key(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.key == 'j':\n previous_slice(ax)\n elif event.key == 'k':\n next_slice(ax)\n fig.canvas.draw()\n\ndef process_scroll(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.button == 'down':\n previous_slice(ax)\n elif event.button == 'up':\n next_slice(ax)\n fig.canvas.draw()\n\ndef previous_slice(ax):\n volume = ax.volume\n ax.index = (ax.index - 1) % volume.shape[0] # wrap around using %\n plt.xlabel(ax.index)\n ax.images[0].set_array(volume[ax.index])\n if ax.volume_2 is not None:\n volume_2 = ax.volume_2\n ax.images[1].set_array(volume_2[ax.index])\n if ax.volume_3 is not None:\n volume_3 = ax.volume_3\n ax.images[2].set_array(volume_3[ax.index])\n\ndef next_slice(ax):\n volume = ax.volume\n ax.index = (ax.index + 1) % volume.shape[0]\n plt.xlabel(ax.index)\n ax.images[0].set_array(volume[ax.index])\n if ax.volume_2 is not None:\n volume_2 = ax.volume_2\n ax.images[1].set_array(volume_2[ax.index])\n if ax.volume_3 is not None:\n volume_3 = ax.volume_3\n ax.images[2].set_array(volume_3[ax.index])\n","sub_path":"multi_slice_viewer.py","file_name":"multi_slice_viewer.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"294404046","text":"import time\n\ndef insertion_sort(a_list):\n \"\"\"Sort a_list in ascending order. \n \n Use insertion sort with linear search.\n \"\"\"\n for current_position in range(1, len(a_list)):\n current_value = a_list[current_position]\n position = current_position\n while position > 0 and a_list[position - 1] > current_value:\n a_list[position] = a_list[position - 1]\n position = position - 1\n a_list[position] = current_value\n\n# the length of the input list\nsize = 32000 # CHANGE THIS\n\n# Create a list of integers in descending order (worst-case scenario)\ntest_list = []\nfor index in range(size):\n test_list.append(size - index)\n# Time the execution of the sorting algorithm\nstart = time.time()\ninsertion_sort(test_list)\nelapsed = (time.time() - start) * 1000\nprint(size, 'integers sorted in', int(elapsed), 'milliseconds')","sub_path":"insertion_sort/python/with_binary_search/normal_insertion_sort.py","file_name":"normal_insertion_sort.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"152242500","text":"import sqlite3\n\ncon = sqlite3.connect('sqlite.db')\n\ncursor = con.cursor()\n\ncommand = \"\"\"SELECT playlist.id,music.path FROM music JOIN playlist\nON music.id_playlist = playlist.id\nORDER BY playlist.id\"\"\"\n\ncursor.execute(command)\nresult = cursor.fetchall()\nprint(result)","sub_path":"Requetes/Requtes SQL python/recuperer les musique dune playlist.py","file_name":"recuperer les musique dune playlist.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"175444705","text":"import pygame\nfrom pygame.locals import *\nfrom functions.checkpoint_functions import get_highest_checkpoint_reached\n\n\ndef death(screen, player, camera, rooms):\n pygame.image.save(screen, \"data/images/temp.jpg\")\n screen_shot = pygame.image.load(\"data/images/temp.jpg\")\n\n black = (0, 0, 0, 255)\n\n black_screen = pygame.Surface((camera.screen_width, camera.screen_height))\n black_screen.set_alpha(0)\n current_a_value = 0\n\n exit_counter = 0\n\n # \"revives\" the player\n player.health = 3\n player.invulnerability_counter = 0\n player.speed_x = 0\n player.speed_y = 0\n player.direction_x = 0\n\n # move the player to the highest checkpoint\n highest_checkpoint = get_highest_checkpoint_reached(rooms)\n player.x, player.y = highest_checkpoint.x, highest_checkpoint.y\n\n in_animation = True\n while in_animation:\n for event in pygame.event.get():\n if event.type == QUIT:\n in_animation = False\n pygame.quit()\n\n if event.type == KEYDOWN:\n in_animation = False\n\n black_screen.fill(black)\n screen.blit(screen_shot, (0, 0))\n screen.blit(black_screen, (0, 0))\n\n current_a_value += 10\n if current_a_value >= 255:\n current_a_value = 255\n exit_counter += 1\n\n black_screen.set_alpha(current_a_value)\n if exit_counter == 30:\n break\n\n pygame.display.update()\n\n\n\n\n\n\n","sub_path":"functions/death.py","file_name":"death.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"396759855","text":"import re\nx = []\nsum = a = az= 0\nflag = 0\nq = input()\nfor i in range(0, 12):\n x.append([])\n if az == 0:\n az = input()\n try:\n int(flag)\n for j in range(0, 12):\n y = float(az)\n x[i].insert(j, y)\n az = input()\n\n except:\n flag = 'a'\n lst = list(int(k) for k in re.findall(r'(-?\\d+)', az))\n for j in lst:\n x[i].append(j)\n az = 0\n\n\n\"\"\" \n for i in x:\n print(i)\n\"\"\"\nc = 11\nl =0\nfor i in range(0, 12):\n for j in range(0, 12):\n if c < j:\n l += 1\n sum += x[i][j]\n c -= 1\n\nif q == 'S':\n print(\"{0:.1f}\".format(round(sum, 1)))\nelif q == 'M':\n print(\"{0:.1f}\".format(round(sum/l, 1)))","sub_path":"1186 - Below the Secundary Diagonal.py","file_name":"1186 - Below the Secundary Diagonal.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"209426402","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport json\nimport time\nimport zlib\nimport base64\nimport shutil\nimport inspect\n\nfrom collections import OrderedDict, deque\nfrom datetime import date, datetime\nfrom base64 import b64encode, b64decode\n\n\ntry:\n from dateutil.parser import parse\nexcept:\n pass\n\ntry:\n import numpy as np\nexcept:\n pass\n\ntry:\n import pandas as pd\nexcept:\n pass\n\ntry:\n from . import compresslib\n from .comments import strip_comments\n from .warning import logger, WARN_MSG, prt_console\n from .util import write, read\n from .pkg.six import PY2, PY3, add_metaclass, string_types, iteritems\nexcept:\n from superjson import compresslib\n from superjson.comments import strip_comments\n from superjson.warning import logger, WARN_MSG, prt_console\n from superjson.util import write, read\n from superjson.pkg.six import PY2, PY3, add_metaclass, string_types, iteritems\n\n\ndef get_class_name(obj):\n \"\"\"Get class name in dot separete notation\n\n >>> from datetime import datetime\n >>> obj = datetime.datetime(2000, 1, 1)\n >>> get_class_name(obj) -> \"datetime.datetime\"\n\n >>> from collections import deque\n >>> obj = deque([1, 2, 3])\n >>> get_class_name(obj) -> \"collections.deque\"\n \"\"\"\n return obj.__class__.__module__ + \".\" + obj.__class__.__name__\n\n\ndef get_class_name_from_dumper_loader_method(func):\n \"\"\"Get default value of ``class_name`` argument.\n\n Because the third argument of dumper, loader method must be the class name.\n\n \"\"\"\n return inspect.getargspec(func).defaults[0]\n\n\ndef is_dumper_method(func):\n \"\"\"Test if it is a dumper method.\n \"\"\"\n if inspect.getargspec(func).args == [\"self\", \"obj\", \"class_name\"]:\n return True\n else:\n return False\n\n\ndef is_loader_method(func):\n \"\"\"Test if it is a loader method.\n \"\"\"\n if inspect.getargspec(func).args == [\"self\", \"dct\", \"class_name\"]:\n return True\n else:\n return False\n\n\nclass Meta(type):\n\n def __new__(cls, name, bases, attrs):\n klass = super(Meta, cls).__new__(cls, name, bases, attrs)\n\n _dumpers = dict()\n _loaders = dict()\n\n for base in inspect.getmro(klass):\n for attr, value in base.__dict__.items():\n dumper_warning_message = WARN_MSG.format(\n attr=attr,\n method_type=\"dumper\",\n obj_or_dct=\"obj\",\n dump_or_load=\"dump\",\n )\n\n loader_warning_message = WARN_MSG.format(\n attr=attr,\n method_type=\"loader\",\n obj_or_dct=\"dct\",\n dump_or_load=\"load\",\n )\n\n # find dumper method,\n if attr.startswith(\"dump_\"):\n try:\n if is_dumper_method(value):\n class_name = get_class_name_from_dumper_loader_method(\n value)\n _dumpers[class_name] = value\n else:\n logger.warning(dumper_warning_message)\n except TypeError:\n logger.warning(dumper_warning_message)\n\n # find loader method\n if attr.startswith(\"load_\"):\n try:\n if is_loader_method(value):\n class_name = get_class_name_from_dumper_loader_method(\n value)\n _loaders[class_name] = value\n else:\n logger.warning(loader_warning_message)\n except TypeError:\n logger.warning(loader_warning_message)\n\n klass._dumpers = _dumpers\n klass._loaders = _loaders\n return klass\n\n\nif PY2:\n bytes_class_name = \"builtins.str\"\n set_class_name = \"__builtin__.set\"\nelif PY3:\n bytes_class_name = \"builtins.bytes\"\n set_class_name = \"builtins.set\"\n\n\ndef is_compressed_json_file(abspath):\n \"\"\"Test a file is a valid json file.\n\n - *.json: uncompressed, utf-8 encode json file\n - *.js: uncompressed, utf-8 encode json file\n - *.gz: compressed, utf-8 encode json file\n \"\"\"\n abspath = abspath.lower()\n fname, ext = os.path.splitext(abspath)\n if ext in [\".json\", \".js\"]:\n is_compressed = False\n elif ext == \".gz\":\n is_compressed = True\n elif ext == \".tmp\":\n return is_compressed_json_file(fname)\n else:\n raise ValueError(\n \"'%s' is not a valid json file. \"\n \"extension has to be '.json' or '.js' for uncompressed, '.gz' \"\n \"for compressed.\" % abspath)\n return is_compressed\n\n\n@add_metaclass(Meta)\nclass SuperJson(object):\n \"\"\"A extensable json encoder/decoder. You can easily custom converter for \n any types.\n \"\"\"\n _dumpers = dict()\n _loaders = dict()\n\n def _dump(self, obj):\n \"\"\"Dump single object to json serializable value.\n \"\"\"\n class_name = get_class_name(obj)\n if class_name in self._dumpers:\n return self._dumpers[class_name](self, obj)\n raise TypeError(\"%r is not JSON serializable\" % obj)\n\n def _json_convert(self, obj):\n \"\"\"Recursive helper method that converts dict types to standard library\n json serializable types, so they can be converted into json.\n \"\"\"\n # OrderedDict\n if isinstance(obj, OrderedDict):\n try:\n return self._dump(obj)\n except TypeError:\n return {k: self._json_convert(v) for k, v in iteritems(obj)}\n\n # nested dict\n elif isinstance(obj, dict):\n return {k: self._json_convert(v) for k, v in iteritems(obj)}\n\n # list or tuple\n elif isinstance(obj, (list, tuple)):\n return list((self._json_convert(v) for v in obj))\n\n # single object\n try:\n return self._dump(obj)\n except TypeError:\n return obj\n\n def _object_hook1(self, dct):\n \"\"\"A function can convert dict data into object. \n\n it's an O(1) implementation. \n \"\"\"\n # {\"$class_name\": obj_data}\n if len(dct) == 1:\n for key, value in iteritems(dct):\n class_name = key[1:]\n if class_name in self._loaders:\n return self._loaders[class_name](self, dct)\n return dct\n return dct\n\n def _object_hook2(self, dct):\n \"\"\"Another object hook implementation.\n\n it's an O(N) implementation.\n \"\"\"\n for class_name, loader in self._loaders.items():\n if (\"$\" + class_name) in dct:\n return loader(self, dct)\n return dct\n\n def dumps(self, obj,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n compress=False,\n **kwargs):\n \"\"\"Dump any object into json string.\n\n :param pretty: if True, dump json into pretty indent and sorted key\n format.\n :type pretty: bool\n\n :param float_precision: default ``None``, limit floats to \n N-decimal points. \n :type float_precision: integer\n\n :param compress: default ``False. If True, then compress encoded string.\n :type compress: bool\n \"\"\"\n if pretty:\n indent = 4\n sort_keys = True\n\n if float_precision is None:\n json.encoder.FLOAT_REPR = repr\n else:\n json.encoder.FLOAT_REPR = lambda x: format(\n x, \".%sf\" % float_precision)\n\n s = json.dumps(\n self._json_convert(obj),\n indent=indent,\n sort_keys=sort_keys,\n ensure_ascii=ensure_ascii,\n **kwargs\n )\n\n if compress:\n s = compresslib.compress(s, return_type=\"str\")\n\n return s\n\n def loads(self, s,\n object_hook=None,\n decompress=False,\n ignore_comments=False,\n **kwargs):\n \"\"\"load object from json encoded string.\n\n :param decompress: default ``False. If True, then decompress string.\n :type decompress: bool\n\n :param ignore_comments: default ``False. If True, then ignore comments.\n :type ignore_comments: bool\n \"\"\"\n if decompress:\n s = compresslib.decompress(s, return_type=\"str\")\n\n if ignore_comments:\n s = strip_comments(s)\n\n if object_hook is None:\n object_hook = self._object_hook1\n\n if \"object_pairs_hook\" in kwargs:\n del kwargs[\"object_pairs_hook\"]\n\n obj = json.loads(\n s,\n object_hook=object_hook,\n object_pairs_hook=None,\n **kwargs\n )\n\n return obj\n\n def dump(self, obj,\n abspath,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n overwrite=False,\n verbose=True,\n **kwargs):\n \"\"\"Dump any object into file.\n\n :param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,\n then perform compression.\n :type abspath: str\n\n :param pretty: if True, dump json into pretty indent and sorted key\n format.\n :type pretty: bool\n\n :param float_precision: default ``None``, limit floats to \n N-decimal points. \n :type float_precision: integer\n\n :param overwrite: default ``False``, If ``True``, when you dump to \n existing file, it silently overwrite it. If ``False``, an alert \n message is shown. Default setting ``False`` is to prevent overwrite \n file by mistake.\n :type overwrite: boolean\n\n :param verbose: default True, help-message-display trigger.\n :type verbose: boolean\n \"\"\"\n prt_console(\"\\nDump to '%s' ...\" % abspath, verbose)\n\n is_compressed = is_compressed_json_file(abspath)\n\n if os.path.exists(abspath):\n if not overwrite:\n prt_console(\n \" Stop! File exists and overwrite is not allowed\",\n verbose,\n )\n return\n\n st = time.clock()\n\n s = self.dumps(\n obj,\n indent=indent,\n sort_keys=sort_keys,\n pretty=pretty,\n float_precision=float_precision,\n ensure_ascii=ensure_ascii,\n # use uncompressed string, and directly write to file\n compress=False,\n **kwargs\n )\n\n with open(abspath, \"wb\") as f:\n if is_compressed:\n f.write(compresslib.compress(s, return_type=\"bytes\"))\n else:\n f.write(s.encode(\"utf-8\"))\n\n prt_console(\" Complete! Elapse %.6f sec.\" % (time.clock() - st),\n verbose)\n return s\n\n def safe_dump(self, obj,\n abspath,\n indent=None,\n sort_keys=None,\n pretty=False,\n float_precision=None,\n ensure_ascii=True,\n verbose=True,\n **kwargs):\n \"\"\"A stable version of :func:`SuperJson.dump`, this method will \n silently overwrite existing file.\n\n There's a issue with :func:`SuperJson.dump`: If your program is \n interrupted while writing, you got an incomplete file, and you also \n lose the original file. So this method write json to a temporary file \n first, then rename to what you expect, and silently overwrite old one. \n This way can guarantee atomic write operation.\n\n **中文文档**\n\n 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了\n 覆盖式写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证\n 写操作的原子性(要么全部完成, 要么全部都不完成), 更好的方法是: 首先将\n 文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途\n 程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会影响原文件。\n \"\"\"\n abspath_temp = \"%s.tmp\" % abspath\n s = self.dump(\n obj,\n abspath_temp,\n indent=indent,\n sort_keys=sort_keys,\n pretty=pretty,\n float_precision=float_precision,\n ensure_ascii=ensure_ascii,\n overwrite=True,\n verbose=verbose,\n **kwargs\n )\n shutil.move(abspath_temp, abspath)\n return s\n\n def load(self, abspath,\n object_hook=None,\n ignore_comments=False,\n verbose=True,\n **kwargs):\n \"\"\"load object from json file.\n\n :param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,\n then perform decompression.\n :type abspath: str\n\n :param ignore_comments: default ``False. If True, then ignore comments.\n :type ignore_comments: bool\n\n :param verbose: default True, help-message-display trigger.\n :type verbose: boolean\n \"\"\"\n prt_console(\"\\nLoad from '%s' ...\" % abspath, verbose)\n\n is_compressed = is_compressed_json_file(abspath)\n\n if not os.path.exists(abspath):\n raise ValueError(\"'%s' doesn't exist.\" % abspath)\n raise\n\n st = time.clock()\n\n with open(abspath, \"rb\") as f:\n if is_compressed:\n s = compresslib.decompress(f.read(), return_type=\"str\")\n else:\n s = f.read().decode(\"utf-8\")\n\n obj = self.loads(\n s,\n object_hook=object_hook,\n decompress=False,\n ignore_comments=ignore_comments,\n )\n\n prt_console(\" Complete! Elapse %.6f sec.\" % (time.clock() - st),\n verbose)\n\n return obj\n\n def dump_bytes(self, obj, class_name=bytes_class_name):\n return {\"$\" + class_name: b64encode(obj).decode()}\n\n def load_bytes(self, dct, class_name=bytes_class_name):\n return b64decode(dct[\"$\" + class_name].encode())\n\n def dump_datetime(self, obj, class_name=\"datetime.datetime\"):\n return {\"$\" + class_name: obj.isoformat()}\n\n def load_datetime(self, dct, class_name=\"datetime.datetime\"):\n return parse(dct[\"$\" + class_name])\n\n def dump_date(self, obj, class_name=\"datetime.date\"):\n return {\"$\" + class_name: str(obj)}\n\n def load_date(self, dct, class_name=\"datetime.date\"):\n return datetime.strptime(dct[\"$\" + class_name], \"%Y-%m-%d\").date()\n\n def dump_set(self, obj, class_name=set_class_name):\n return {\"$\" + class_name: [self._json_convert(item) for item in obj]}\n\n def load_set(self, dct, class_name=set_class_name):\n return set(dct[\"$\" + class_name])\n\n def dump_deque(self, obj, class_name=\"collections.deque\"):\n return {\"$\" + class_name: [self._json_convert(item) for item in obj]}\n\n def load_deque(self, dct, class_name=\"collections.deque\"):\n return deque(dct[\"$\" + class_name])\n\n def dump_OrderedDict(self, obj, class_name=\"collections.OrderedDict\"):\n return {\n \"$\" + class_name: [\n (key, self._json_convert(value)) for key, value in iteritems(obj)\n ]\n }\n\n def load_OrderedDict(self, dct, class_name=\"collections.OrderedDict\"):\n return OrderedDict(dct[\"$\" + class_name])\n\n def dump_nparray(self, obj, class_name=\"numpy.ndarray\"):\n return {\"$\" + class_name: self._json_convert(obj.tolist())}\n\n def load_nparray(self, dct, class_name=\"numpy.ndarray\"):\n return np.array(dct[\"$\" + class_name])\n\n\nsuperjson = SuperJson()\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n\n def test_common():\n data = {\n \"int\": 1,\n \"str\": \"Hello\",\n \"bytes\": \"Hello\".encode(\"utf-8\"),\n \"date\": date.today(),\n \"datetime\": datetime.now(),\n \"set\": set([\n datetime(2000, 1, 1),\n datetime(2000, 1, 2),\n ]),\n \"deque\": deque([\n deque([1, 2]),\n deque([3, 4]),\n ]),\n \"ordereddict\": OrderedDict([\n (\"b\", OrderedDict([(\"b\", 1), (\"a\", 2)])),\n (\"a\", OrderedDict([(\"b\", 1), (\"a\", 2)])),\n ]),\n }\n s = superjson.dumps(data, indent=4)\n# print(s)\n data1 = superjson.loads(s)\n# pprint(data1)\n assert data == data1\n\n s = superjson.dumps(data, compress=True)\n# print(s)\n data1 = superjson.loads(s, decompress=True)\n# pprint(data1)\n assert data == data1\n\n test_common()\n\n def test_numpy():\n data = {\n \"ndarray_int\": np.array([[1, 2], [3, 4]]),\n \"ndarray_float\": np.array([[1.1, 2.2], [3.3, 4.4]]),\n \"ndarray_str\": np.array([[\"a\", \"b\"], [\"c\", \"d\"]]),\n \"ndarray_datetime\": np.array(\n [datetime(2000, 1, 1), datetime(2010, 1, 1)]\n ),\n }\n s = superjson.dumps(data, indent=4)\n# print(s)\n data1 = superjson.loads(s)\n# pprint(data1)\n\n for key in data:\n assert np.array_equal(data[key], data1[key])\n\n test_numpy()\n\n def test_pandas():\n \"\"\"\n\n .. note:: Not supported yet!\n \"\"\"\n data = {\n \"series\": pd.Series([(\"a\", datetime(2000, 1, 1)),\n (\"b\", datetime(2010, 1, 1))]),\n }\n# s = superjson.dumps(data, indent=4)\n# print(s)\n# data1 = superjson.loads(s)\n# pprint(data1)\n\n# test_pandas()\n\n def test_extend():\n \"\"\"Test for extend SuperJson for arbitrary custom types.\n \"\"\"\n from sfm.nameddict import Base as Address\n\n class User(object):\n\n def __init__(self, id=None, name=None):\n self.id = id\n self.name = name\n\n def __repr__(self):\n return \"User(id=%r, name=%r)\" % (self.id, self.name)\n\n def __eq__(self, other):\n return self.id == other.id and self.name == other.name\n\n Address_class_name = \"sfm.nameddict.Base\"\n assert get_class_name(Address()) == \"sfm.nameddict.Base\"\n\n User_class_name = \"__main__.User\"\n assert get_class_name(User()) == \"__main__.User\"\n\n class MySuperJson(SuperJson):\n\n def dump_User(self, obj, class_name=\"__main__.User\"):\n key = \"$\" + class_name\n return {key: {\"id\": obj.id, \"name\": obj.name}}\n\n def load_User(self, dct, class_name=\"__main__.User\"):\n key = \"$\" + class_name\n return User(**dct[key])\n\n def dump_Address(self, obj, class_name=\"sfm.nameddict.Base\"):\n key = \"$\" + class_name\n return {key: {\"street\": obj.street,\n \"city\": obj.city,\n \"state\": obj.state,\n \"zipcode\": obj.zipcode}}\n\n def load_Address(self, dct, class_name=\"sfm.nameddict.Base\"):\n key = \"$\" + class_name\n return Address(**dct[key])\n\n js = MySuperJson()\n data = {\n \"int\": 1,\n \"str\": \"Hello\",\n \"user\": User(id=1, name=\"Alice\"),\n \"address\": Address(\n street=\"123 Main St\", city=\"New York\", state=\"NY\", zipcode=\"10001\",\n ),\n }\n s = js.dumps(data, indent=4)\n# print(s)\n\n data1 = js.loads(s)\n# print(data1)\n\n assert data == data1\n\n test_extend()\n","sub_path":"cazipcode/pkg/superjson/_superjson.py","file_name":"_superjson.py","file_ext":"py","file_size_in_byte":20033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"140912209","text":"lens = {1: 4, 2: 4, 3: 2, 4: 2, 99: 1, 5: 3, 6: 3, 7: 4, 8: 4, 9: 2}\n\n\nclass IntcodeController:\n\n def __init__(self, intcode, buffer=[], pointer=0, extended={}, get_input=None):\n self.intcode = intcode\n self.virgincode = [] + intcode\n self.buffer = buffer\n self.virginbuffer = [] + buffer\n self.ib = 0\n self.pointer = pointer\n self.output = []\n self.alive = True\n self.opcode = []\n self.lastopcode = 0\n self.relbase = 0\n self.extended = extended\n self.tracking = False\n self.get_input = get_input\n\n def dump(self):\n return self.intcode.copy(), self.buffer.copy(), self.ib, self.pointer, self.output.copy(), \\\n self.alive, self.opcode.copy(), self.lastopcode, self.relbase, self.extended.copy()\n\n def restore_from_dump(self, dump):\n self.intcode = dump[0]\n self.buffer = dump[1]\n self.ib = dump[2]\n self.pointer = dump[3]\n self.output = dump[4]\n self.alive = dump[5]\n self.opcode = dump[6]\n self.lastopcode = dump[7]\n self.relbase = dump[8]\n self.extended = dump[9]\n\n def reset(self):\n self.__init__(self.virgincode, self.virginbuffer, pointer=0)\n\n def print(self):\n print('pointer=', self.pointer, 'opcode=', self.intcode[self.pointer],\n 'relbase=', self.relbase, self.extended)\n\n def parsecurrentopcode(self):\n n = self.intcode[self.pointer]\n self.opcode = [n % 100, (n // 100) % 10, (n // 1000) % 10,\n (n // 10000) % 10]\n\n def read(self, n):\n if n < len(self.intcode):\n return self.intcode[n]\n else:\n return self.extended[n] if n in self.extended else 0\n\n def write(self, v):\n offset = lens[self.opcode[0]] - 1\n n = self.parameter(offset, mode=1) + (self.relbase if self.opcode[offset] == 2 else 0)\n if n < len(self.intcode):\n self.intcode[n] = v\n else:\n self.extended[n] = v\n\n def parameter(self, offset, mode=-1):\n mode = self.opcode[offset] if mode == -1 else mode\n if mode == 0:\n return self.read(self.intcode[self.pointer+offset])\n if mode == 1:\n return self.intcode[self.pointer+offset]\n if mode == 2:\n return self.read(self.intcode[self.pointer+offset]+self.relbase)\n\n def read_input(self):\n if not self.get_input:\n self.ib += 1\n return self.buffer[self.ib-1]\n else:\n return self.get_input()\n\n def onetick(self):\n self.parsecurrentopcode()\n next_pointer = self.pointer + lens[self.opcode[0]]\n self.lastopcode = self.opcode[0]\n if self.tracking:\n self.print()\n if self.opcode[0] == 1:\n self.write(self.parameter(1) + self.parameter(2))\n if self.opcode[0] == 2:\n self.write(self.parameter(1) * self.parameter(2))\n if self.opcode[0] == 3:\n self.write(self.read_input())\n if self.opcode[0] == 4:\n self.output.append(self.parameter(1))\n if self.opcode[0] == 5:\n if self.parameter(1) != 0:\n next_pointer = self.parameter(2)\n if self.opcode[0] == 6:\n if self.parameter(1) == 0:\n next_pointer = self.parameter(2)\n if self.opcode[0] == 7:\n self.write(1 if self.parameter(1) < self.parameter(2) else 0)\n if self.opcode[0] == 8:\n self.write(1 if self.parameter(1) == self.parameter(2) else 0)\n if self.opcode[0] == 9:\n self.relbase += self.parameter(1)\n if self.opcode[0] == 99:\n self.alive = False\n return next_pointer\n\n def run(self):\n while self.alive:\n self.pointer = self.onetick()\n\n def run2output(self):\n self.lastopcode = 0\n while (self.lastopcode != 4) and self.alive:\n self.pointer = self.onetick()\n return self.alive\n\n def next_triple(self):\n self.run2output()\n self.run2output()\n self.run2output()\n if self.alive:\n return self.output[-3:]\n else:\n return None\n","sub_path":"intcomp.py","file_name":"intcomp.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"510198498","text":"from tibanna_4dn.lambdas.check_task import handler\nfrom tests.tibanna.pony.conftest import valid_env\nimport pytest\nimport boto3\n\n\n@pytest.fixture()\ndef check_task_input():\n return {\"config\": {\"log_bucket\": \"tibanna-output\"},\n \"jobid\": \"test_job\",\n \"push_error_to_end\": True\n }\n\n\n@pytest.fixture()\ndef s3(check_task_input):\n bucket_name = check_task_input['config']['log_bucket']\n return boto3.resource('s3').Bucket(bucket_name)\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_check_task_awsem_fails_if_job_error_found(check_task_input, s3):\n jobid = 'hahaha'\n check_task_input_modified = check_task_input\n check_task_input_modified['jobid'] = jobid\n job_started = \"%s.job_started\" % jobid\n s3.put_object(Body=b'', Key=job_started)\n job_error = \"%s.error\" % jobid\n s3.put_object(Body=b'', Key=job_error)\n res = handler(check_task_input_modified, '')\n assert ('error' in res)\n s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})\n s3.delete_objects(Delete={'Objects': [{'Key': job_error}]})\n","sub_path":"tests/tibanna/pony/check_task/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"474473446","text":"# IEEE 488.2 Block Data Format\n\n# transfer size requirement:\n# len(str(data)) <= 9\nMAX_SUPPORTED_SIZE = 10**9 - 1\n\ndef to_block_data_format(data):\n assert type(data) == bytes, 'data must be bytes!'\n size = len(data)\n assert size < MAX_SUPPORTED_SIZE, f'Maximum supported data size is {MAX_SUPPORTED_SIZE}. len(data) => {size}.'\n size_len = len(str(size))\n header = f'#{size_len}{size}'.encode();\n return header + data\n","sub_path":"src/server/test_automation/commands/block_data.py","file_name":"block_data.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"74408502","text":"\"\"\"\nTest if a binary tree is height-balanced\n\nA binary tree is said to be height-balanced if for each node in the tree, \nthe difference in the height of its left and right subtrees is at most one.\n\nWrite a program that takes as input the root of a binary tree and checks whether the tree is height-balanced\n\nVariant 1: Write a program that returns the size of the largest subtree that is complete.\n\nVariant 2: Define a node in a binary tree to be k-balanced if the difference in the number of nodes in left and right subtrees \nis no more than k. Design an algorithm that takes as input a binary tree and positive integer k and returns a node in the binary\ntree such that hte node is not k-balanced but all of its descendants are k-balanced. \n\nFor the sake of simplicity - will link to leetcode example for easier testing (https://leetcode.com/problems/balanced-binary-tree/)\n\"\"\"\n\nclass TreeNode:\n def __init__(self, val = 0, left = None, right = None):\n self.val = val\n self.left = left\n self.right = right\n\nclass MainSolution_9_1:\n\n \"\"\"\n Bottom Up Approaches\n \"\"\"\n def dfs_helper(self, root):\n \"\"\"\n Bottom-up Recursion: \n Time Complexity: O(N) - each node gets visited once through recursion\n Space Complexity: O(N) - if tree is not balanced/is skewed - then recursion stack will reach number of nodes, N\n \"\"\"\n if (root is None or self.is_tree_balanced == False):\n return (-1)\n\n left_height = self.dfs_helper(root.left)\n right_height = self.dfs_helper(root.right)\n\n curr_height = max(left_height, right_height) + 1\n if (abs(left_height - right_height) > 1):\n self.is_tree_balanced = False\n return(curr_height)\n\n def is_balanced(self, root: TreeNode) -> bool:\n self.is_tree_balanced = True\n self.dfs_helper(root)\n return(self.is_tree_balanced)\n\n def dfs_helper(self, root):\n \"\"\"\n Alternative Way\n \"\"\"\n if (root is None): return (True, -1)\n\n is_left_tree_balanced, left_height = dfs_helper(root.left) # LEFT\n is_right_tree_balanced, right_height = dfs_helper(root.right) # RIGHT\n\n if (is_right_tree_balanced == False or is_left_tree_balanced == False): # VISITED\n return(False, max(left_height, right_height))\n \n return(True, max(left_height, right_height) + 1)\n\n def is_balanced_alt_style(self, root: TreeNode) -> bool:\n \"\"\"\n Can send an array of values - specifically [is_balanced, BinaryTreeNode]\n \"\"\"\n self.dfs_helper_alternative(root)\n \n \"\"\"\n Top Down Approach\n \"\"\"\n\n def calculate_height(self, root: TreeNode):\n if (root is None):\n return (-1)\n return (max(self.height(root.left), self.height(root.right)) + 1)\n \n def is_balanced_top_down(self, root: TreeNode):\n \"\"\"\n Time Complexity: O(N LOG N)\n For the root node, it will calculate the height for the root by going down the left and then the right side.\n Each side will be LOG(N) - height of the tree. So times 2. It will be 2*LOG(N).\n However we call height for each node in the tree and there are N nodes. So it is N * 2 * LOG(N).\n\n Bounded by O(N) because if the tree is skewed, it will stop recursion as soon as height of a nodes children are not within 1.\n\n Space Complexity: O(N) - worst case the tree is skewed and it will hold all nodes in the tree in its stack\n \"\"\"\n if (root is None):\n return True\n \n comparison = abs(self.height(root.left) - self.height(root.right)) <= 1\n ans = comparison and self.is_balanced_top_down(root.left) and self.is_balanced_top_down(root.right)\n return(ans)\n\n\n","sub_path":"Chapter 9 - Binary Trees/9_1 Test if a binary tree is height balanced.py","file_name":"9_1 Test if a binary tree is height balanced.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"281485067","text":"import os\n\nimport click\nimport hcl\nfrom popper import utils as pu\nfrom popper.cli import pass_context\nfrom popper.gha import Workflow\n\n\n@click.option(\n '--wfile',\n help=(\n 'File containing the definition of the workflow. '\n '[default: ./github/main.workflow OR ./main.workflow]'\n ),\n required=False,\n default=None\n)\n@click.option(\n '--recursive',\n help='Generate .dot file for any.workflow file '\n 'found recursively from current path.',\n required=False,\n is_flag=True\n)\n@click.command('dot', short_help='Generate a graph in the .dot format')\n@pass_context\ndef cli(ctx, wfile, recursive):\n \"\"\"\n Creates a graph in the .dot format representing the workflow\n \"\"\"\n wfile_list = list()\n if recursive:\n wfile_list = pu.find_recursive_wfile()\n else:\n wfile_list.append(pu.find_default_wfile(wfile))\n\n for wfile in wfile_list:\n pipeline = Workflow(wfile, False, False, False, False, False, False)\n\n graph = list()\n\n wf = pipeline.wf\n workflow_name = list(wf['workflow'].keys())[0]\n\n action = wf['resolves'][0]\n last_action = get_first_action(wf)\n\n for act in last_action:\n graph.append(\"\\t{} -> {};\\n\".format(\n workflow_name.replace(' ', '_').replace('-', '_'),\n act.replace(' ', '_').replace('-', '_')))\n\n parent_action = cur_action = action\n graph = add(parent_action, cur_action, wf['action'], graph)\n graph = ''.join(list(set(graph)))\n graph = \"digraph G {\\n\" + graph + \"}\\n\"\n pu.info(graph)\n\n\n# Recursively go through \"needs\" and add corresponding actions to graph\ndef add(parent_action, cur_action, actions, graph):\n\n if 'needs' in actions[cur_action]:\n action_list = actions[cur_action]['needs']\n for act in action_list:\n graph = add(cur_action, act, actions, graph)\n\n # Adds edges to the graph\n if cur_action != parent_action:\n graph.append(\"\\t{} -> {};\\n\".format(\n cur_action.replace(' ', '_').replace('-', '_'),\n parent_action.replace(' ', '_').replace('-', '_')))\n\n return graph\n\n\ndef get_first_action(wf):\n actions = list()\n for act in wf['action']:\n if act in wf['action']:\n if 'needs' not in wf['action'][act]:\n actions.append(act)\n return actions\n","sub_path":"cli/popper/commands/cmd_dot.py","file_name":"cmd_dot.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"130138227","text":"#!/home/cdays/pyweb3/bin/python3\nimport json\nimport web3\nimport time\nimport re\n\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\n\ndef wait_for_receipt(w3, tx_hash, poll_interval):\n while True:\n tx_receipt = w3.eth.getTransactionReceipt(tx_hash)\n if tx_receipt:\n return tx_receipt\n time.sleep(poll_interval)\n\nprint(\"Content-Type: application/json\")\nprint()\n\n# web3.py instance\nw3 = Web3(HTTPProvider('http://localhost:8501'))\nw3.middleware_stack.inject(web3.middleware.geth_poa_middleware, layer=0)\n\nabi = [{'constant': False, 'stateMutability': 'nonpayable', 'name': 'setGreeting', 'payable': False, 'type': 'function', 'inputs': [{'type': 'string', 'name': '_greeting'}], 'outputs': []}, {'constant': True, 'stateMutability': 'view', 'name': 'greet', 'payable': False, 'type': 'function', 'inputs': [], 'outputs': [{'type': 'string', 'name': ''}]}, {'constant': True, 'stateMutability': 'view', 'name': 'greeting', 'payable': False, 'type': 'function', 'inputs': [], 'outputs': [{'type': 'string', 'name': ''}]}, {'stateMutability': 'nonpayable', 'type': 'constructor', 'inputs': [], 'payable': False}]\n\ncontract_address= '0x2D2403dc13D5c52dE1dD055624762dadc5c10397'\ncontract_instance = w3.eth.contract(address=contract_address, abi=abi,ContractFactoryClass=ConciseContract)\n#tx_hash= contract_instance.setGreeting('Lulu', transact={'from': w3.eth.accounts[0]})\n#wait_for_receipt(w3, tx_hash, 1)\ngetre = re.compile('^greet')\noutput = {}\nfor func in abi:\n try:\n if getre.match(func[\"name\"]):\n output[func[\"name\"]] = eval(\"contract_instance.\"+func[\"name\"]+\"()\")\n except:\n pass\nprint(json.dumps(output))\n\n","sub_path":"cgi-bin/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"113374478","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nsortphotos.py\n\nCreated on 3/2/2013\nCopyright (c) S. Andrew Ning. All rights reserved.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import with_statement\nimport subprocess\nimport os\nimport sys\nimport shutil\ntry:\n import json\nexcept:\n import simplejson as json\nimport filecmp\nfrom datetime import datetime, timedelta\nimport re\nimport locale\nimport exifread\nimport reverse_geocode\n\n# Setting locale to the 'local' value\nlocale.setlocale(locale.LC_ALL, '')\n\nexiftool_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Image-ExifTool', 'exiftool')\n\n\n# -------- convenience methods -------------\n\ndef parse_date_exif(date_string):\n \"\"\"\n extract date info from EXIF data\n YYYY:MM:DD HH:MM:SS\n or YYYY:MM:DD HH:MM:SS+HH:MM\n or YYYY:MM:DD HH:MM:SS-HH:MM\n or YYYY:MM:DD HH:MM:SSZ\n \"\"\"\n\n # split into date and time\n elements = str(date_string).strip().split() # ['YYYY:MM:DD', 'HH:MM:SS']\n\n if len(elements) < 1:\n return None\n\n # parse year, month, day\n date_entries = elements[0].split(':') # ['YYYY', 'MM', 'DD']\n\n # check if three entries, nonzero data, and no decimal (which occurs for timestamps with only time but no date)\n if len(date_entries) == 3 and date_entries[0] > '0000' and '.' not in ''.join(date_entries):\n year = int(date_entries[0])\n month = int(date_entries[1])\n day = int(date_entries[2])\n else:\n return None\n\n # parse hour, min, second\n time_zone_adjust = False\n hour = 12 # defaulting to noon if no time data provided\n minute = 0\n second = 0\n\n if len(elements) > 1:\n time_entries = re.split('(\\+|-|Z)', elements[1]) # ['HH:MM:SS', '+', 'HH:MM']\n time = time_entries[0].split(':') # ['HH', 'MM', 'SS']\n\n if len(time) == 3:\n hour = int(time[0])\n minute = int(time[1])\n second = int(time[2].split('.')[0])\n elif len(time) == 2:\n hour = int(time[0])\n minute = int(time[1])\n\n # adjust for time-zone if needed\n if len(time_entries) > 2:\n time_zone = time_entries[2].split(':') # ['HH', 'MM']\n\n if len(time_zone) == 2:\n time_zone_hour = int(time_zone[0])\n time_zone_min = int(time_zone[1])\n\n # check if + or -\n if time_entries[1] == '+':\n time_zone_hour *= -1\n\n dateadd = timedelta(hours=time_zone_hour, minutes=time_zone_min)\n time_zone_adjust = True\n\n\n # form date object\n try:\n date = datetime(year, month, day, hour, minute, second)\n except ValueError:\n return None # errors in time format\n\n # try converting it (some \"valid\" dates are way before 1900 and cannot be parsed by strtime later)\n try:\n date.strftime('%Y/%m-%b') # any format with year, month, day, would work here.\n except ValueError:\n return None # errors in time format\n\n # adjust for time zone if necessary\n if time_zone_adjust:\n date += dateadd\n\n return date\n\n\n\ndef get_oldest_timestamp(data, additional_groups_to_ignore, additional_tags_to_ignore, print_all_tags=False):\n \"\"\"data as dictionary from json. Should contain only time stamps except SourceFile\"\"\"\n\n # save only the oldest date\n date_available = False\n oldest_date = datetime.now()\n oldest_keys = []\n\n # save src file\n src_file = data['SourceFile']\n\n # ssetup tags to ignore\n ignore_groups = ['ICC_Profile'] + additional_groups_to_ignore\n ignore_tags = ['SourceFile', 'XMP:HistoryWhen'] + additional_tags_to_ignore\n\n\n if print_all_tags:\n print('All relevant tags:')\n\n # run through all keys\n for key in data.keys():\n\n # check if this key needs to be ignored, or is in the set of tags that must be used\n if (key not in ignore_tags) and (key.split(':')[0] not in ignore_groups) and 'GPS' not in key:\n\n date = data[key]\n\n if print_all_tags:\n print(str(key) + ', ' + str(date))\n\n # (rare) check if multiple dates returned in a list, take the first one which is the oldest\n if isinstance(date, list):\n date = date[0]\n\n try:\n exifdate = parse_date_exif(date) # check for poor-formed exif data, but allow continuation\n except Exception as e:\n exifdate = None\n\n if exifdate and exifdate < oldest_date:\n date_available = True\n oldest_date = exifdate\n oldest_keys = [key]\n\n elif exifdate and exifdate == oldest_date:\n oldest_keys.append(key)\n\n if not date_available:\n oldest_date = None\n\n if print_all_tags:\n print()\n\n return src_file, oldest_date, oldest_keys\n\n\n\ndef check_for_early_morning_photos(date, day_begins):\n \"\"\"check for early hour photos to be grouped with previous day\"\"\"\n\n if date.hour < day_begins:\n print('moving this photo to the previous day for classification purposes (day_begins=' + str(day_begins) + ')')\n date = date - timedelta(hours=date.hour+1) # push it to the day before for classificiation purposes\n\n return date\n\n# read tags using exifread\n### helper functions, for geocoordinates\n\ndef _convert_to_degress(value):\n \"\"\"\n Helper function to convert the GPS coordinates stored in the EXIF to degress in float format\n :param value:\n :type value: exifread.utils.Ratio\n :rtype: float\n \"\"\"\n d = float(value.values[0].num) / float(value.values[0].den)\n m = float(value.values[1].num) / float(value.values[1].den)\n s = float(value.values[2].num) / float(value.values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)\n \ndef get_exif_location(exif_data):\n \"\"\"\n Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)\n \"\"\"\n lat = None\n lon = None\n\n gps_latitude = _get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = _get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = _get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = _get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = _convert_to_degress(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = _convert_to_degress(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n return lat, lon\n\ndef _get_if_exist(data, key):\n if key in data:\n return data[key]\n\n return None\n\n###\n\n# Open image file for reading (binary mode)\n\ndef get_exif(file_name):\n\n f = open(file_name, 'rb')\n\n tagjson = {'SourceFile': file_name}\n\n tags = exifread.process_file(f, details=True)\n f.close()\n\n lat, lon = get_exif_location(tags)\n for dt_tag in tags:\n dt_value = '%s' % tags[dt_tag]\n tagjson[dt_tag.replace(\" \",\":\")] = dt_value # tag group separator\n\n tagjson['EXIF:GpsLat'] = lat\n tagjson['EXIF:GpsLon'] = lon\n\n if lat and lon:\n location = reverse_geocode.get([lat,lon])\n tagjson['Location:country_code'] = location[\"country_code\"]\n tagjson['Location:city'] = location[\"city\"]\n tagjson['Location:country'] = location[\"country\"]\n\n return tagjson\n\ndef get_exif_folder(folder_name):\n\n # scan through all folders and subfolders\n tagjson = []\n\n for root, subdirs, files in os.walk(folder_name):\n for filename in files:\n file_path = os.path.join(root, filename)\n tagjson += [get_exif(file_path)]\n\n return tagjson\n\n# ---------------------------------------\n\n\ndef sortPhotos(src_dir, dest_dir, sort_format, rename_format, recursive=False,\n copy_files=False, test=False, remove_duplicates=True, day_begins=0,\n additional_groups_to_ignore=['File'], additional_tags_to_ignore=[],\n use_only_groups=None, use_only_tags=None, verbose=True, keep_filename=False):\n \"\"\"\n This function is a convenience wrapper around ExifTool based on common usage scenarios for sortphotos.py\n\n Parameters\n ---------------\n src_dir : str\n directory containing files you want to process\n dest_dir : str\n directory where you want to move/copy the files to\n sort_format : str\n format code for how you want your photos sorted\n (https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior)\n rename_format : str\n format code for how you want your files renamed\n (https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior)\n None to not rename file\n recursive : bool\n True if you want src_dir to be searched recursively for files (False to search only in top-level of src_dir)\n copy_files : bool\n True if you want files to be copied over from src_dir to dest_dir rather than moved\n test : bool\n True if you just want to simulate how the files will be moved without actually doing any moving/copying\n remove_duplicates : bool\n True to remove files that are exactly the same in name and a file hash\n keep_filename : bool\n True to append original filename in case of duplicates instead of increasing number\n day_begins : int\n what hour of the day you want the day to begin (only for classification purposes). Defaults at 0 as midnight.\n Can be used to group early morning photos with the previous day. must be a number between 0-23\n additional_groups_to_ignore : list(str)\n tag groups that will be ignored when searching for file data. By default File is ignored\n additional_tags_to_ignore : list(str)\n specific tags that will be ignored when searching for file data.\n use_only_groups : list(str)\n a list of groups that will be exclusived searched across for date info\n use_only_tags : list(str)\n a list of tags that will be exclusived searched across for date info\n verbose : bool\n True if you want to see details of file processing\n\n \"\"\"\n\n # some error checking\n if not os.path.exists(src_dir):\n raise Exception('Source directory does not exist')\n\n args = [src_dir]\n\n metadata = get_exif_folder(args[0])\n\n # setup output to screen\n num_files = len(metadata)\n print()\n\n if test:\n test_file_dict = {}\n\n # parse output extracting oldest relevant date\n for idx, data in enumerate(metadata):\n\n # extract timestamp date for photo\n src_file, date, keys = get_oldest_timestamp(data, additional_groups_to_ignore, additional_tags_to_ignore)\n\n # fixes further errors when using unicode characters like \"\\u20AC\"\n src_file.encode('utf-8')\n\n if verbose:\n # write out which photo we are at\n ending = ']'\n if test:\n ending = '] (TEST - no files are being moved/copied)'\n print('[' + str(idx+1) + '/' + str(num_files) + ending)\n print('Source: ' + src_file)\n else:\n # progress bar\n numdots = int(20.0*(idx+1)/num_files)\n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d of %d ' % ('='*numdots, idx+1, num_files))\n sys.stdout.flush()\n\n # check if no valid date found\n if not date:\n if verbose:\n print('No valid dates were found using the specified tags. File will remain where it is.')\n print()\n # sys.stdout.flush()\n continue\n\n # ignore hidden files\n if os.path.basename(src_file).startswith('.'):\n print('hidden file. will be skipped')\n print()\n continue\n\n if verbose:\n print('Date/Time: ' + str(date))\n print('Corresponding Tags: ' + ', '.join(keys))\n\n # early morning photos can be grouped with previous day (depending on user setting)\n date = check_for_early_morning_photos(date, day_begins)\n\n\n # create folder structure\n\n country = data.get(\"Location:country\",\"\")\n city = data.get(\"Location:city\", '')\n country_code = data.get(\"Location:country_code\", '')\n\n sort_format_geo = sort_format.replace('%country',country)\n sort_format_geo = sort_format_geo.replace('%city',city)\n sort_format_geo = sort_format_geo.replace('%country_code',country_code)\n dir_structure = date.strftime(sort_format_geo)\n dirs = dir_structure.split('/')\n dest_file = dest_dir\n for thedir in dirs:\n dest_file = os.path.join(dest_file, thedir)\n if not test and not os.path.exists(dest_file):\n os.makedirs(dest_file)\n\n # rename file if necessary\n filename = os.path.basename(src_file)\n\n if rename_format is not None and date is not None:\n _, ext = os.path.splitext(filename)\n filename = date.strftime(rename_format) + ext.lower()\n\n # setup destination file\n dest_file = os.path.join(dest_file, filename)\n root, ext = os.path.splitext(dest_file)\n\n if verbose:\n name = 'Destination '\n if copy_files:\n name += '(copy): '\n else:\n name += '(move): '\n print(name + dest_file)\n\n\n # check for collisions\n append = 1\n fileIsIdentical = False\n\n while True:\n\n if (not test and os.path.isfile(dest_file)) or (test and dest_file in test_file_dict.keys()): # check for existing name\n if test:\n dest_compare = test_file_dict[dest_file]\n else:\n dest_compare = dest_file\n if remove_duplicates and filecmp.cmp(src_file, dest_compare): # check for identical files\n fileIsIdentical = True\n if verbose:\n print('Identical file already exists. Duplicate will be ignored.\\n')\n break\n\n else: # name is same, but file is different\n if keep_filename:\n orig_filename = os.path.splitext(os.path.basename(src_file))[0]\n dest_file = root + '_' + orig_filename + '_' + str(append) + ext\n else:\n dest_file = root + '_' + str(append) + ext\n append += 1\n if verbose:\n print('Same name already exists...renaming to: ' + dest_file)\n\n else:\n break\n\n\n # finally move or copy the file\n if test:\n test_file_dict[dest_file] = src_file\n\n else:\n\n if fileIsIdentical:\n continue # ignore identical files\n else:\n if copy_files:\n shutil.copy2(src_file, dest_file)\n else:\n shutil.move(src_file, dest_file)\n\n\n\n if verbose:\n print()\n # sys.stdout.flush()\n\n\n if not verbose:\n print()\n\n\ndef main():\n import argparse\n\n # setup command line parsing\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description='Sort files (primarily photos and videos) into folders by date\\nusing EXIF and other metadata')\n parser.add_argument('src_dir', type=str, help='source directory')\n parser.add_argument('dest_dir', type=str, help='destination directory')\n parser.add_argument('-r', '--recursive', action='store_true', help='search src_dir recursively')\n parser.add_argument('-c', '--copy', action='store_true', help='copy files instead of move')\n parser.add_argument('-s', '--silent', action='store_true', help='don\\'t display parsing details.')\n parser.add_argument('-t', '--test', action='store_true', help='run a test. files will not be moved/copied\\ninstead you will just a list of would happen')\n parser.add_argument('--sort', type=str, default='%Y/%m-%b-%country-%city',\n help=\"choose destination folder structure using datetime format \\n\\\n https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior. \\n\\\n Use forward slashes / to indicate subdirectory(ies) (independent of your OS convention). \\n\\\n The default is '%%Y/%%m-%%b', which separates by year then month \\n\\\n with both the month number and name (e.g., 2012/02-Feb). \\n\\\n Use %%city, %%country and %%country_code for location.\")\n parser.add_argument('--rename', type=str, default=None,\n help=\"rename file using format codes \\n\\\n https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior. \\n\\\n default is None which just uses original filename\")\n parser.add_argument('--keep-filename', action='store_true',\n help='In case of duplicated output filenames an increasing number and the original file name will be appended',\n default=False)\n parser.add_argument('--keep-duplicates', action='store_true',\n help='If file is a duplicate keep it anyway (after renaming).')\n parser.add_argument('--day-begins', type=int, default=0, help='hour of day that new day begins (0-23), \\n\\\n defaults to 0 which corresponds to midnight. Useful for grouping pictures with previous day.')\n parser.add_argument('--ignore-groups', type=str, nargs='+',\n default=[],\n help='a list of tag groups that will be ignored for date informations.\\n\\\n list of groups and tags here: http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/\\n\\\n by default the group \\'File\\' is ignored which contains file timestamp data')\n parser.add_argument('--ignore-tags', type=str, nargs='+',\n default=[],\n help='a list of tags that will be ignored for date informations.\\n\\\n list of groups and tags here: http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/\\n\\\n the full tag name needs to be included (e.g., EXIF:CreateDate)')\n parser.add_argument('--use-only-groups', type=str, nargs='+',\n default=None,\n help='specify a restricted set of groups to search for date information\\n\\\n e.g., EXIF')\n parser.add_argument('--use-only-tags', type=str, nargs='+',\n default=None,\n help='specify a restricted set of tags to search for date information\\n\\\n e.g., EXIF:CreateDate')\n\n # parse command line arguments\n args = parser.parse_args()\n\n sortPhotos(args.src_dir, args.dest_dir, args.sort, args.rename, args.recursive,\n args.copy, args.test, not args.keep_duplicates, args.day_begins,\n args.ignore_groups, args.ignore_tags, args.use_only_groups,\n args.use_only_tags, not args.silent, args.keep_filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/sortphotos.py","file_name":"sortphotos.py","file_ext":"py","file_size_in_byte":19033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"536478971","text":"import matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom scipy.signal import find_peaks\nimport time\n\n# data = np.loadtxt('Data/2D/2D plot-0.68.dat')\n# data1 = np.loadtxt('Data/2D/2D plot-{0.69}.dat')\n# data2 = np.loadtxt('Data/2D/2D plot-{0.70}.dat')\n# data3 = np.loadtxt('Data/2D/2D plot-{0.71}.dat')\n# x = data[0]\n# y = data[1]/20\n# x1 = data1[0]\n# y1 = data1[1]\n# x2 = data2[0]\n# y2 = data2[1]\n# x3 = data3[0]\n# y3 = data3[1]\n# # x4 = data4[0]\n# # y4 = data4[1]\n# for i in range(len(y1)):\n# y1[i] = y1[i] + 0.5\n# for i in range(len(y2)):\n# y2[i] = y2[i] + 1\n# for i in range(len(y3)):\n# y3[i] = y3[i] + 1.5\n# # for i in range(len(y4)):\n# # y4[i] = y4[i] + 1.5\n# plt.plot(x,y)\n# plt.plot(x1,y1)\n# plt.plot(x2,y2)\n# plt.plot(x3,y3)\n# # plt.plot(x4,y4)\n# plt.xlabel('Radius (r)')\n# plt.ylabel('Pair Distribution Function g(r)')\n# plt.legend([\"Area Fraction = 0.68\", \"Area Fraction = 0.69\", \"Area Fraction = 0.70\", \"Area Fraction = 0.71\"], loc='upper right')\n# # plt.savefig(\"3D Mixed Plot(3).jpeg\", dpi = 250)\n# plt.show()\ndef d3():\n N3d = [296,320,344,360,372,384,400]\n y3d = []\n x3d = []\n for i in N3d:\n x3d.append((i*math.pi)/(6*(8**3)))\n af = 0.66\n for i in range(len(N3d)):\n data = np.loadtxt(f\"Data/3D/3D plot-{N3d[i]}.dat\")\n N2d = 100\n diameter = 1\n afL = math.sqrt(N2d*math.pi*(diameter**2)/(4*af))\n L = round(afL,3)\n print(L)\n avg_no_density = N2d/(L**2)\n print(avg_no_density)\n density1d = 0.75\n density3d = N3d[i]/(8**3)\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n # plt.plot(xvals,yvals)\n # plt.plot(xvals[troughs],yvals[troughs],'o')\n # plt.title(\"Area Fraction - 0.71\")\n # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n # plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n\n cn = np.trapz(yvals[0:32],x=xvals[0:32])\n final = cn*4*math.pi*density3d\n y3d.append(final)\n\n print(y3d)\n plt.plot(x3d,y3d,marker= 'x')\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n # plt.errorbar(x3d,y3d,yerr=0.1)\n plt.show()\n\ndef d2():\n n2d = [0.66,0.67,0.68,0.69,0.7,0.71,0.72]\n y2d = []\n for i in n2d:\n data = np.loadtxt(f\"Data/2D/2D plot-{i}.dat\")\n N2d = 100\n diameter = 1\n afL = math.sqrt(N2d*math.pi*(diameter**2)/(4*i))\n L = round(afL,3)\n avg_no_density = N2d/(L**2)\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n plt.plot(xvals,yvals)\n plt.plot(xvals[troughs],yvals[troughs],'o')\n # plt.title(\"Area Fraction - 0.71\")\n # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n # print(troughs)\n cn = np.trapz(yvals[0:27],x=xvals[0:27])\n final = cn*4*math.pi*avg_no_density\n y2d.append(final)\n\n print(y2d)\n plt.plot(n2d,y2d,marker= 'x')\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n plt.show()\n\ndef d1():\n n2d = [0.75,0.8,0.85]\n y2d = []\n for i in n2d:\n data = np.loadtxt(f\"Data/1D/1d plot-{i}.dat\")\n avg_no_density = i\n xvals = data[0]\n yvals = data[1]\n\n troughs,_ = find_peaks(-yvals,height=[-20,-0.001])\n\n # plt.plot(xvals,yvals)\n # plt.plot(xvals[troughs],yvals[troughs],'o')\n # # plt.title(\"Area Fraction - 0.71\")\n # # plt.savefig('Data/2D/2D_Mixed_Plot.png', dpi =250)\n # plt.show()\n for i in range(len(xvals)):\n yvals[i] = (yvals[i])* (xvals[i]**2)\n # print(xvals[troughs[0]])\n # print(yvals[troughs[0]])\n # ytrough = troughs[0]\n print(troughs)\n cn = np.trapz(yvals[0:7],x=xvals[0:7])\n final = cn*4*math.pi*avg_no_density\n y2d.append(final)\n\n print(y2d)\n plt.plot(n2d,y2d)\n plt.xlabel('Packing Fraction')\n plt.ylabel('Coordination Number')\n plt.show()\n\nd1()","sub_path":"Scripts/Plot overlap.py","file_name":"Plot overlap.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"632197780","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys,os,tempfile,shutil,subprocess,glob\nimport argparse\n\nif __name__ == \"__main__\":\n\n # define options\n parser = argparse.ArgumentParser(description=\"Harvest track validation plots\")\n parser.add_argument(\"files\", metavar=\"file\", type=str, nargs=\"+\",\n help=\"files to be harvested (convert edm DQM format to plain ROOT format\")\n parser.add_argument(\"-o\", \"--outputFile\", type=str, default=\"harvest.root\",\n help=\"output file (default: 'harvest.root')\")\n\n opts = parser.parse_args()\n\n # absolute path outputFile\n outputFile = os.path.abspath(opts.outputFile)\n\n # check the input files\n for f in opts.files:\n if not os.path.exists(f):\n parser.error(\"DQM file %s does not exist\" % f)\n\n # compile a file list for cmsDriver\n filelist = \",\".join([\"file:{0}\".format(os.path.abspath(_file)) for _file in opts.files])\n\n # go to a temporary directory\n _cwd = os.getcwd()\n _tempdir = tempfile.mkdtemp()\n os.chdir(_tempdir)\n\n # compile cmsDriver command\n cmsDriverCommand = \"cmsDriver.py harvest --scenario pp --filetype DQM --conditions auto:run2_mc --mc -s HARVESTING:@trackingOnlyValidation+@trackingOnlyDQM+postProcessorHLTtrackingSequence -n -1 --filein {0}\".format(filelist)\n print(\"# running cmsDriver\" + \"\\n\" + cmsDriverCommand)\n \n # run it\n subprocess.call(cmsDriverCommand.split(\" \"))\n\n # find the output and move it to the specified output file path\n ofiles = glob.glob(\"DQM*.root\")\n if len(ofiles) != 1:\n print(\"ERROR: expecting exactly one output file matching DQM*.root\")\n print(\" ls of current directory({0}):\".format(_tempdir))\n os.system(\"ls -lt\")\n sys.exit()\n shutil.move(ofiles[0],outputFile)\n \n # move back to the original directory\n os.chdir(_cwd)\n\n # and get rid of the temporary directory\n shutil.rmtree(_tempdir)\n \n","sub_path":"Validation/RecoTrack/scripts/harvestTrackValidationPlots.py","file_name":"harvestTrackValidationPlots.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"136107514","text":"import datetime\r\nimport urllib.request\r\nimport json\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nimport time\r\n\r\n#Create timestamp, open logfile\r\nprint('Initializing...')\r\ntimestamp = datetime.datetime.now().strftime('%Y-%m-%d @ %H:%M:%S')\r\nlog = open('log.txt', 'a') \r\n\r\n#Create dictionary of record upcs + pricing from file\r\nrecords = {}\r\nwith open('records.txt') as f:\r\n for line in f:\r\n line = line.rstrip()\r\n (key, val) = line.split()\r\n records[int(key)] = val\r\n\r\n#Call iTunes API to check pricing on items\r\nprint('Connecting to iTunes...')\r\n\r\nfor key in records:\r\n\r\n #Build query by concatenating url + record id, trigger api call\r\n query = 'https://itunes.apple.com/lookup?id={}'.format(key)\r\n response = json.loads(urllib.request.urlopen(query).read().decode('utf-8'))['results']\r\n\r\n #Parse JSON, compare price\r\n if response:\r\n title = response[0]['collectionName']\r\n old_price = float(records[key])\r\n new_price = float(response[0]['collectionPrice'])\r\n\r\n if old_price > new_price:\r\n print('Change to {}! [{} > {}]'.format(title, old_price, new_price))\r\n log.write(timestamp + ': Change to {} [{} > {}]'.format(title, old_price, new_price))\r\n\r\n## sms = MIMEText('iTunes price drop detected!')\r\n## sms['From'] = 'sender@domain.com'\r\n## sms['To'] = 'recipient@domain.com'\r\n##\r\n## s = smtplib.SMTP('smtp.gmail.com', 587)\r\n## s.ehlo()\r\n## s.starttls()\r\n## s.login('username', 'password')\r\n## s.sendmail('sender@domain.com', 'recipient@domain.com', sms.as_string())\r\n## s.quit()\r\n \r\n else:\r\n print('No change to {}'.format(title))\r\n log.write(timestamp + ': No change to {}\\n'.format(title))\r\n else:\r\n log.write(timestamp + ': No match for {}\\n'.format(key)) \r\n\r\nprint('Finalizing...')\r\nlog.write('-' * 25 + '\\n')\r\nlog.close()\r\nf.close()\r\n\r\nprint('Complete!')\r\ntime.sleep(2)\r\n\r\nexit\r\n","sub_path":"itunes_pricewatcher.py","file_name":"itunes_pricewatcher.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"302532811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nVisualize the ensemble streamflow predictions\n\n@author: Travis Williams\n\"\"\"\nfrom sys import platform\nimport copy\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport glob\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport scipy\n\nif platform == 'win32':\n from flask_cache import Cache\nelse:\n from flask_caching import Cache\n\n\n# In[] Set up application and server\napp = dash.Dash(__name__)\n\n# The stylesheet is based one of the DASH examples\n# (oil and gas extraction in New York)\napp.css.append_css({'external_url': 'https://rawgit.com/WilliamsTravis/' +\n 'PRF-USDM/master/dash-stylesheet.css'})\n\n# Create server object\nserver = app.server\n\n# Create and initialize a cache for storing data - data pocket\ncache = Cache(config={'CACHE_TYPE': 'simple'})\ncache.init_app(server)\n\n# Create a container for the graphs\nlayout = dict(\n autosize=True,\n height=500,\n font=dict(color='black'),\n titlefont=dict(color='black',\n size='20',\n weight='bold'),\n margin=dict(\n l=35,\n r=35,\n b=65,\n t=55,\n pad=4\n ),\n # hovermode=\"closest\",\n plot_bgcolor=\"white\",\n paper_bgcolor=\"lightblue\",\n legend=dict(font=dict(size=10), orientation='h'))\n\n# In[] Get data -2 sets\nfiles = glob.glob(os.path.join('data', \"*\"))\nfiles = [f for f in files if \"historical\" not in f]\ndolc_hist = pd.read_csv(os.path.join('data', \"DOLC2_historical.csv\")) # use os.path.join() and \"..\" instead of \"data\\\\\"\nmphc_hist = pd.read_csv(os.path.join('data', \"MPHC2_historical.csv\"))\n\ndolc_files = [f for f in files if \"DOLC2\" in f]\nmphc_files = [f for f in files if \"MPHC2\" in f]\n\ndolc_dfs = {f[-8:-4]: pd.read_csv(f) for f in dolc_files}\nyrs = [f[-8:-4] for f in dolc_files]\nyear_options = [{'label': y, 'value': y} for y in yrs]\n\nmphc_dfs = {f[-8:-4]: pd.read_csv(f) for f in mphc_files}\n\ndf_dict = {'MPHC2': mphc_dfs,\n 'DOLC2': dolc_dfs}\n\nhist_dict = {'MPHC2': mphc_hist,\n 'DOLC2': dolc_hist}\n\nsite_options = [{'label': \"McPhee Reservoir\", 'value': 'MPHC2'},\n {'label': \"McPhee Reservoir Entry Point at Dolores\",\n 'value': 'DOLC2'}]\n\n# In[] Set up HTML structure\n\napp.layout = html.Div([\n\n html.H2(\"Ensemble Streamflow Predictions at the McPhee Reservoir\",\n style={'text-align': 'center'}),\n\n html.Div(className=\"row\",\n children=[html.Div([dcc.Dropdown(id='site_choice',\n options=site_options,\n placeholder=\"McPhee Reservoir\")],\n className=\"two columns\"),\n\n html.Div([dcc.Dropdown(id=\"year\",\n options=year_options,\n value='2018')],\n className=\"one column\",\n style={'width': '90'})]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='cbrfc_graph')],\n className=\"six columns\"),\n\n html.Div(children=[dcc.Graph(id='cbrfc_history')],\n style={'float': 'right'},\n className='six columns')]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='err_evolve')],\n className='six columns'),\n html.Div(children=[dcc.Graph(id='uncrtnty_evolve')],\n className='six columns')]),\n\n html.Div(className=\"row\",\n children=[html.Div(children=[dcc.Graph(id='err_evolve_all')],\n className='six columns'),\n html.Div(children=[dcc.Graph(id='uncrtnty_evolve_all')],\n className='six columns')]),\n\n html.Br(),\n\n html.Div(className=\"twelve columns\",\n children=[dcc.Graph(id='our_graph')]),\n\n html.Div(className=\"row\",\n style={'width': '100%',\n 'margin-bottom': '75'},\n children=[\n html.Div(\n className=\"three columns\",\n style={\n 'height': '200',\n 'margin-right': '10',\n 'margin-left': '150'},\n children=[\n html.P('S.D.'),\n html.P(id='sd_output'),\n dcc.Slider(id='sd',\n min=0,\n max=25,\n step=1,\n value=0,\n updatemode='drag',\n vertical=True,\n marks={0: {'label': '0'},\n 25: {'label': '25'}})]),\n\n html.Div(\n className=\"three columns\",\n style={'height': '200',\n 'margin-right': '10'},\n children=[\n html.P('S.D. 2'),\n html.P(id='sd_output2'),\n dcc.Slider(id='sd2',\n min=0,\n max=25,\n step=1,\n value=0,\n updatemode='drag',\n vertical=True,\n marks={0: {'label': '0'},\n 25: {'label': '25'}})])\n ]),\n html.Hr(),\n ]) # *END\n\n\n# In[]\n@app.callback(Output('cbrfc_graph', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n obs = df['Observed Accumulation'].dropna()\n final = obs.iloc[-1]\n var = round(np.nanvar(df['ESP 50']), 2)\n yaxis = dict(range=[0, 700])\n df['ratio10'] = df['ESP 10'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text10'] = df['ESP 10'].astype(str) + \" KAF; \" + df['ratio10']\n df['ratio50'] = df['ESP 50'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text50'] = df['ESP 50'].astype(str) + \" KAF; \" + df['ratio50']\n df['ratio90'] = df['ESP 90'].apply(lambda x: str(round(x/final*100, 2)) + \"%\")\n df['text90'] = df['ESP 90'].astype(str) + \" KAF; \" + df['ratio90']\n\n annotation = dict(\n text=\"Forecast Variance: \" + \"{:,}\".format(var) + \"\",\n x=year + '-06-25',\n y=650,\n font=dict(size = 17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#8ad88d',\n width=2,\n dash=\"dashdot\"),\n x=df.Date,\n y=df['ESP 90'],\n name='p90',\n text=df['text90'],\n hoverinfo='text'),\n dict(type='line',\n line=dict(color='#04a00a',\n width=4),\n x=df.Date,\n y=df['ESP 50'],\n name=\"p50\",\n text=df['text50'],\n hoverinfo='text'\n ),\n dict(type='line',\n line=dict(color='#8ad88d',\n width=2,\n dash=\"dashdot\"),\n x=df.Date,\n y=df['ESP 10'],\n name='p10',\n text=df['text10'],\n hoverinfo='text'),\n dict(type='line',\n line=dict(color='blue',\n width=4),\n x=df.Date,\n y=df['Observed Accumulation'],\n name=\"Observation (KAF)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (\"Colorado Basin River Forecast Center\" +\n \"'s \" + '\"ESP\"' + \" - \" + site_choice + \" \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('cbrfc_history', 'figure'),\n [Input('site_choice', 'value')])\ndef makeGraph2(site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n df = hist_dict[site_choice]\n var = round(np.nanvar(df['vol']), 2)\n yaxis = dict(range=[0, 700])\n df['text'] = df['vol'].astype(str) + \"KAF\"\n annotation = dict(\n text=\"Streamflow Variance: \" + \"{:,}\".format(var) + \"\",\n x=2010,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='blue',\n width=4),\n x=df['year'],\n y=df['vol'],\n text=df['text'],\n hoverinfo='text',\n name=\"Observation (KAF)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" streamflow history \")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('err_evolve', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph3(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n q = df['Observed Total'].dropna().tolist()[0]\n forecasts = np.array(df['ESP 50'])\n errors = abs(forecasts - q)\n errors = np.round(errors, 2)\n mean_err = round(np.nanmean(errors), 2)\n df['text'] = errors\n df['text'] = df['text'].astype(str) + \" KAF\"\n yaxis = dict(range=[0, 700])\n xaxis = dict(df['Date'])\n annotation = dict(\n text=\"Mean Absolute Error: \" + \"{:,}\".format(mean_err) + \"\",\n x=df.Date.iloc[-15],\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='red',\n width=5),\n x=df['Date'],\n y=errors,\n text=df['text'],\n hoverinfo='text',\n name=\"Error (p50 - q)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 50 Absolute Errors \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['xaxis'] = xaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('uncrtnty_evolve', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph4(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n uncertainties = np.array(df['ESP 10']) - np.array(df['ESP 90'])\n uncertainties = np.round(uncertainties, 2)\n df['text'] = uncertainties\n df['text'] = df['text'].astype(str) + \" KAF\"\n mean_uncert = round(np.nanmean(uncertainties), 2)\n yaxis = dict(range=[0, 700])\n annotation = dict(\n text=\"Average Uncertainty: \" + \"{:,}\".format(mean_uncert) + \"\",\n x=df.Date.iloc[-15],\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#f4d942',\n width=5),\n x=df.Date,\n y=uncertainties,\n text=df['text'],\n hoverinfo='text',\n name=\"Uncertainty (p10 - p90)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 90, 10 range \" + year)\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('err_evolve_all', 'figure'),\n [Input('site_choice', 'value')])\ndef makeGraph5(site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n dfs = [dfs[key] for key in dfs.keys()]\n for i in range(len(dfs)):\n dfs[i] = dfs[i][75:295]\n print(len(dfs[i])) # January to August\n\n qs = [df['Observed Total'].dropna().tolist()[0] for df in dfs]\n forecasts = [np.array(df['ESP 50']) for df in dfs]\n errors = [forecasts[i] - qs[i] for i in range(len(qs))]\n errors = np.nanmean(errors, axis=0)\n # errors = abs(forecasts - q)\n errors = np.round(errors, 2)\n mean_err = round(np.nanmean(errors), 2)\n df = dfs[0][['Date', 'Average']]\n # df['Date'] = pd.to_datetime(df['Date'])\n # df['Date'] = df['Date'].map(lambda x: x.strftime('%m-%d'))\n df['day'] = df.index\n df['errors'] = errors\n df['text'] = df['errors'].astype(str) + \" KAF\"\n yaxis = dict(range=[0, 700])\n xaxis = dict(df['Date'])\n annotation = dict(\n text=\"Mean Absolute Error: \" + \"{:,}\".format(mean_err) + \"\",\n x=330,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='red',\n width=5),\n x=df['day'],\n y=df['errors'],\n text=df['text'],\n hoverinfo='text',\n name=\"Error (p50 - q)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 50 Absolute Errors - All Years\")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['xaxis'] = xaxis\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n@app.callback(Output('uncrtnty_evolve_all', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value')])\ndef makeGraph6(year, site_choice):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n dfs = [dfs[key] for key in dfs.keys()]\n for i in range(len(dfs)):\n dfs[i] = dfs[i][75:295]\n print(len(dfs[i])) # January to August\n\n uncertainties = [np.array(df['ESP 10']) - np.array(df['ESP 90']) for\n df in dfs]\n uncertainties = np.nanmean(uncertainties, axis=0)\n uncertainties = np.round(uncertainties, 2)\n df = dfs[0][['Date', 'Average']]\n df['uncertainties'] = uncertainties\n df['text'] = df['uncertainties'].astype(str) + \" KAF\"\n df['day'] = df.index\n mean_uncert = round(np.nanmean(uncertainties), 2)\n yaxis = dict(range=[0, 700])\n annotation = dict(\n text=\"Average Uncertainty: \" + \"{:,}\".format(mean_uncert) + \"\",\n x=330,\n y=650,\n font=dict(size=17),\n showarrow=False)\n\n data = [dict(type='line',\n line=dict(color='#f4d942',\n width=5),\n x=df.day,\n y=df.uncertainties,\n text=df['text'],\n hoverinfo='text',\n name=\"Uncertainty (p10 - p90)\")]\n\n layout_c = copy.deepcopy(layout)\n layout_c['title'] = (site_choice + \" ESP 90, 10 range - All Years\")\n layout_c['dragmode'] = 'select'\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['annotations'] = [annotation]\n layout_c['yaxis'] = yaxis\n layout_c['showlegend'] = True\n\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n# In[] Set up application callbacks for our simulation\n@app.callback(Output('our_graph', 'figure'),\n [Input('year', 'value'),\n Input('site_choice', 'value'),\n Input('sd', 'value')])\ndef makeGraph7(year, site_choice, sd):\n if not site_choice:\n site_choice = 'MPHC2'\n dfs = df_dict[site_choice]\n df = dfs[year]\n df = df[75:295] # January to August\n dates = df['Date']\n obs = df['Observed Accumulation'].dropna()\n final = obs.iloc[-1]\n average = list(df['Average'])[0]\n yaxis = dict(range=[0, 700])\n df['ratio50'] = df['ESP 50'].apply(\n lambda x: str(round(x/final*100, 2)) + \"%\")\n\n# Simulation part:\n forecast = np.random.normal(final, sd, len(dates))\n\n df_f = pd.DataFrame({'Date': dates, 'Forecast': forecast})\n data = [dict(type='line',\n line=dict(color='#cc872e',\n width=4),\n x=df_f.Date,\n y=df_f.Forecast,\n text=df['ratio50'],\n hovermode='text',\n name='Simulation'),\n dict(\n type='line',\n line=dict(color='blue',\n width=4),\n x=df.Date,\n y=df['Observed Accumulation'],\n name='Observation')]\n\n layout_c = copy.deepcopy(layout)\n layout_c['dragmode'] = 'select'\n layout_c['yaxis'] = yaxis\n layout_c['title'] = 'Our \"ESP\" - ' + site_choice\n layout_c['font'] = dict(color='white'),\n layout_c['titlefont'] = dict(color='white',\n size='20',\n weight='bold')\n layout_c['legend'] = dict(font=dict(size=15),\n orientation='v')\n layout_c['paper_bgcolor'] = '#013589'\n layout_c['showlegend'] = True\n figure = dict(data=data, layout=layout_c)\n\n return figure\n\n\n# In[] Run application\n\n@app.callback(Output('sd_output', 'children'),\n [Input('sd', 'value')])\ndef displaySD(sd):\n return str(sd)\n\n@app.callback(Output('sd_output2', 'children'),\n [Input('sd2', 'value')])\ndef displaySD2(sd):\n return str(sd)\n\n# In[]\n\n\n\n# In[]\n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0')\n","sub_path":"scripts/vis_app.py","file_name":"vis_app.py","file_ext":"py","file_size_in_byte":19430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"380910402","text":"\n\n# ## Q1. Preparing Dataset\n\n# In[83]:\n\n\nimport pandas as pd\n\n\n# In[3]:\n\n\nCustomer = pd.read_csv(\"Customer.csv\")\nCustomer\n\n\n# In[4]:\n\n\nProduct_hierarchy = pd.read_csv(\"prod_cat_info.csv\")\nProduct_hierarchy\n\n\n# In[5]:\n\n\nTransaction = pd.read_csv(\"Transactions.csv\")\nTransaction\n\n\n# In[6]:\n\n\n# 1. Merging\nCustomer_Trans = pd.merge(left = Customer,\n right = Transaction,\n left_on = 'customer_Id',\n right_on = 'cust_id',\n how = 'inner',\n indicator = True)\n\n\n# In[7]:\n\n\nCustomer_Trans\n\n\n# In[99]:\n\n\nCustomer_Final = pd.merge(left = Customer_Trans,\n right = Product_hierarchy,\n left_on = 'prod_cat_code',\n right_on = 'prod_cat_code',\n how = 'inner'\n )\n\n\n\n# In[100]:\n\n\nCustomer_Final\n\n\n# ## Q2. Summary Report\n\n#\n# ### Column names with their data-types\n#\n\n# In[11]:\n\n\nCustomer_Final.dtypes\n\n\n# ### Top 10 Observations\n\n# In[12]:\n\n\nCustomer_Final.head(10)\n\n\n# ### 10 Bottom Observations\n\n# In[13]:\n\n\nCustomer_Final.tail(10)\n\n\n# ### Five Number Summary\n\n# In[14]:\n\n\nimport numpy as np\nData_min = Customer_Final['total_amt'].min()\nData_max = Customer_Final['total_amt'].max()\nData_q1 = np.percentile(Customer_Final.total_amt,25)\nmedian = np.percentile(Customer_Final.total_amt,50)\nData_q3 = np.percentile(Customer_Final.total_amt,75)\nprint('Min = ',Data_min)\nprint('Max = ',Data_max)\nprint('Median = ',median)\nprint('Q1 = ',Data_q1)\nprint('Q3 = ',Data_q3)\n\n\n# ## Frequency Table :\n#\n# ### Store type\n\n# In[15]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['Store_type'])\nfreq_table.columns = ['TeleShop','MBR','e-shop','Flagshipstore']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ### Prod_cat\n\n# In[16]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['prod_cat'])\n\nfreq_table.columns = ['Books','Bags','Clothing','Footwear','Electronics','Home and kitchen']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ### Prod_subcat\n\n# In[17]:\n\n\nfreq_table = pd.crosstab(index = Customer_Final['Gender'],\n columns = Customer_Final['prod_subcat'])\nfreq_table.columns = ['Men','Women','Kid','Mobile','Computer','Personal Appliances','Cameras','Audio and video',\n 'Fiction','Academic','Non-fiction','Children','Comics','DIY','Furnishing','Kitchen',\n 'Bath','Tools']\nfreq_table.index = ['Male','Female']\nfreq_table\n\n\n# ## Q3. Histograms for all continuous variables and frequency bars for categorical variables\n\n# ### Histogram for continous variables -\n#\n#\n# ### 1. Tax\n\n# In[20]:\n\n\nimport matplotlib.pyplot as plt\nTax = Customer_Final['Tax']\nplt.hist(Tax,color=['yellow'])\nplt.xlabel('tax')\nplt.ylabel('Frequency')\nplt.show()\n\n\n# ### 2. Total amount\n\n# In[19]:\n\n\nTotal_Amt = Customer_Final['total_amt']\nplt.hist(Total_Amt,color = 'Blue')\nplt.xlabel('Total amount')\nplt.ylabel('Frequency')\nplt.show()\n\n\n# ### Frequency Bar for Categorical variables -\n#\n#\n#\n# ### 1. Gender\n\n# In[21]:\n\n\nCustomer_Final['Gender'].value_counts().plot(kind = 'bar')\n\n\n# ### 2. Store type\n\n# In[22]:\n\n\nCustomer_Final['Store_type'].value_counts().plot(kind = 'bar')\n\n\n# ### 3. Product category\n\n# In[23]:\n\n\nCustomer_Final['prod_cat'].value_counts().plot(kind = 'bar')\n\n\n# ### 4. Product sub category\n\n# In[24]:\n\n\nCustomer_Final['prod_subcat'].value_counts().plot(kind = 'bar')\n\n\n# ## Q4\n#\n# ### A. Time period of the available transaction data\n\n# In[ ]:\n\n\n\n\n\n# ### B. Count number of negative total amount\n\n# In[25]:\n\n\ndf = Customer_Final['total_amt']\ncount2 = Customer_Final.loc[(df<0),['total_amt']].count()\ncount2\n\n\n# ## Q5. Analyze which product categories are more popular among females vs male customers.\n\n# In[134]:\n\n\n# Popular among Male\nM = Customer_Final.loc[Customer_Final['Gender']=='M']\n\ngroup_prod = M.groupby(['prod_cat'])['total_amt'].sum()\npopular_M = group_prod.nlargest(1)\ndisplay('The most popular product category in Male customers is : ',popular_M)\n\n# Popular among Female\nF = Customer_Final.loc[Customer_Final['Gender']=='F']\ngroup_prod1 = F.groupby(['prod_cat'])['total_amt'].sum()\npopular_F = group_prod1.nlargest(1)\ndisplay('The most popular product category in Female customers is : ',popular_F)\n\n\n# #### Among Male vs Female the most popular product category is Books.\n\n# ## Q6. Which City code has the maximum customers and what was the percentage of customers from that city?\n\n# In[173]:\n\n\nmax_cust = Customer['city_code'].value_counts()\nt = max_cust.nlargest(1)\n\ndisplay(\"City code which has Maximum customers is : \",t)\n\n#percentage of customers from city code 3\ntot_customer = Customer['customer_Id'].count()\npercent = round((595/tot_customer)*100,2)\nprint(\"Percentage of customers from the city code 3 is {}% : \".format(percent))\n\n\n# ## Q7. Which store type sells the maximum products by value and by quantity?\n\n# In[24]:\n\n\nsort_list = Customer_Final.sort_values(['total_amt','Qty'],ascending = False)\ndisplay(sort_list.head(1)['Store_type'])\n\n\n# ## Q8. What was the total amount earned from the \"Electronics\" and \"Clothing\" categories from\n#Flagship Stores?\n\n# In[32]:\n\n\ndf = pd.DataFrame(Customer_Final)\ntf = df[df.prod_cat.isin(['Electronics','Clothing']) & (df.Store_type == 'Flagship store')]\ntotal = tf.total_amt.sum()\nprint('Total amount earned',total)\n\n\n# ## Q9. What was the total amount earned from \"Male\" customers under the \"Electronics\" category?\n\n# In[52]:\n\n\ntf1 = df[(df.Gender == 'M') & (df.prod_cat == 'Electronics')]\ntotal = tf1.total_amt.sum()\nprint('Total amount earned',total)\n\n\n# ## Q10. How many customers have more than 10 unique transactions, after removing all transactions which have any negative amounts?\n\n# In[46]:\n\n\ndf1 = df[(df.total_amt > 0)]\nts = df1.transaction_id.nunique()\nprint('Total customers having more than 10 unique transactions are - ',ts)\n\n\n# ## Q11. For all customers aged between 25 - 35, find out:\n#\n#\n# ### a. What was the total amount spent for “Electronics” and “Books” product categories?\n\n# In[47]:\n\n\ncurr_year = pd.to_datetime('today').year\ndob_year = pd.DatetimeIndex(df['DOB']).year #extract year from DOB\n\nx = dob_year-100 # for the years which belongs to 60's\nv = curr_year - x\ny = curr_year - dob_year\ndf['age'] = (np.where(dob_year > curr_year,v,y))\ndf\n\n\n# In[174]:\n\n\ntotal = df.loc[((df.age >25) & (df.age <35)) & ((df.prod_cat=='Books') | (df.prod_cat=='Electronics'))]['total_amt'].sum()\nprint('Total amount spent',total)\n\n\n# ### b. What was the total amount spent by these customers between 1st Jan, 2014 to 1st Mar, 2014?\n\n# In[92]:\n\n\nCustomer_Final['tran_date'] = pd.to_datetime(Customer_Final['tran_date'])\n\nt_date = Customer_Final[(Customer_Final['tran_date'] > '2014-01-01') & (Customer_Final['tran_date'] < '2014-03-01')]\ntotal_amount = t_date.total_amt.sum()\nprint('Total amount spent by the customer - ',total_amount)\n\n\n","sub_path":"Retail Case study/Retail_Case_Study.py","file_name":"Retail_Case_Study.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"461239526","text":"from torch import nn\nimport torch.nn.functional as F\n\n\nclass MLP(nn.Module):\n \"\"\"\n Linear (256) -> ReLU -> Dropout-> Linear(64) -> ReLU -> Dropout -> Linear(10) -> ReLU-> LogSoftmax\n \"\"\"\n\n def __init__(self, l1=256, l2=64, dr=.25):\n super().__init__()\n self.fc1 = nn.Linear(784, l1)\n self.fc2 = nn.Linear(l1, l2)\n self.fc3 = nn.Linear(l2, 10)\n\n # Define proportion or neurons to dropout\n self.dropout = nn.Dropout(dr)\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n x = F.relu(self.fc1(x))\n # Apply dropout\n x = self.dropout(x)\n x = F.relu(self.fc2(x))\n # Apply dropout\n x = self.dropout(x)\n x = F.relu(self.fc3(x))\n x = F.log_softmax(x, dim=1)\n\n return x\n","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"369952034","text":"from socket import *\n\ndef main():\n host = 'localhost'\n\n sock = socket(AF_INET6, SOCK_STREAM)\n addr = (host,9898)\n sock.connect(addr)\n\n try:\n msg = b\"This was a terrible test!\\n\"\n sock.sendall(msg)\n except socket.errno as e:\n print(\"Socket error \", e)\n finally:\n sock.close()\n\nmain()","sub_path":"LABS/Socket/client6.py","file_name":"client6.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"186319766","text":"import pytest\r\nfrom flask import Flask\r\nfrom jinja2 import Environment, DictLoader\r\nfrom latexcreator import Api\r\n\r\nclass TestConfig:\r\n TESTING = True\r\n\r\n@pytest.fixture\r\ndef app():\r\n\r\n _app = Flask('test')\r\n _app.config.from_object(TestConfig)\r\n ctx = _app.test_request_context()\r\n ctx.push()\r\n\r\n yield _app\r\n \r\n ctx.pop()\r\n \r\n@pytest.yield_fixture\r\ndef client(app):\r\n with app.test_client() as client:\r\n yield client\r\n \r\n@pytest.fixture\r\ndef api():\r\n return Api()\r\n \r\n@pytest.fixture\r\ndef default_environment():\r\n templates = {'a':'a:{{ a }}','b':'b:{{ b }}, a:{{ a }}'}\r\n return Environment(loader=DictLoader(templates)),templates","sub_path":"tests/test_conf.py","file_name":"test_conf.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"186788950","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------\n# ファイル名 :rirakkuma.py\n# 機能概要 :2019ROBOCON(予選用)\n# ・move_baseを使用\n# ・スタート/ゴール地点は\"Start_Goal.csv\"ファイルで設定する\n# 作成日時 :2019/08/19\n# -----------------------------------------------------------------------\n\n# Import\n# common\nimport rospy\nimport math\n# move_base\nfrom move_base_msgs.msg import MoveBaseActionResult\nfrom geometry_msgs.msg import PoseStamped\n# Twist\nfrom geometry_msgs.msg import Twist\n# file\nimport csv # csv file\nimport os # file path\n# euler to quaternio\nimport tf\nfrom geometry_msgs.msg import Quaternion\n\n# Add ImageProcessing --- START ---\n# use LaserScan\nfrom sensor_msgs.msg import LaserScan\n\n# use Camera\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\n\n# Image Process function\nimport imgProc #function\nfrom imgProc import * #class\n\n# Add ImageProcessing --- END ---\n\nimport math\nfrom tf import TransformListener\nfrom geometry_msgs.msg import PointStamped\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\n\n#camera_fov = 50.0\n#camera_width = 640.0\n\n\n# PythonでEnum的なことを実現\nclass MainState():\n STOP = 0 # 停止\n EXEC_ACTION = 1 # アクション実行\n MOVING = 2 # 移動\n HUNTING = 3 # 追跡\n\nclass RirakkumaBot():\n # クラス変数\n HUNT_CNT_NUM = 0 # HUNTING(追跡)状態に遷移する待ち回数 (2020.08.18 Wait無効)\n \n def __init__(self, bot_name=\"NoName\"):\n ### Parameter Settings\n # bot name \n self.name = bot_name\n\n # State\n self.main_state = MainState.STOP # メイン状態\n self.prev_main_state = MainState.STOP # 前回メイン状態\n self.next_state = MainState.STOP # 次状態\n # CSV ファイルから取り出したデータ保存用リスト\n self.c_data = [] # csvデータ\n self.c_data_cnt = 0 # csvデータ順次取得のためのカウンタ\n # simple/goal用のシーケンス番号 ※これ無いとエラーになるため必要\n self.goal_seq_no = 0\n # HUNTING(追跡)移行カウンタ\n self.hunting_cnt = 0\n\n # Flags\n # 初期化フラグ\n self.initialize_flg = False\n # ゴール到着フラグ\n self.goal_arrival_flg = False\n\n ### Publisher を ROS Masterに登録\n # Velocity\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n self.pub_goal = rospy.Publisher('move_base_simple/goal', PoseStamped, queue_size=1, latch=True)\n ### Subscriber を ROS Masterに登録\n self.sub_goal_result = rospy.Subscriber(\"move_base/result\", MoveBaseActionResult, self.result_callback, queue_size=1)\n \n # Add ImageProcessing --- START ---\n # lidar scan subscriber\n self.scan = LaserScan()\n self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)\n\n # camera subscribver\n # for convert image topic to opencv obj\n self.img = None\n self.camera_preview = True\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)\n #self.image_sub = rospy.Subscriber('/red_bot/image_raw', Image, self.imageCallback)\n\n #cImgProc instance\n self.proc = cImgProc()\n # Add ImageProcessing --- END ---\n\n self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n\n def calcTwist_center(self, center, depth, S):\n \"\"\"目標が中心になるようTwistの値を設定する\"\"\"\n #depth [m]\n if center != -1:\n val = int(center / 16) #centerを0-4の10段階に\n # --- 近距離 --------------------------------\n if 0.3 > depth:\n #if 100 < S: \n x = -0.2\n th = 0.0\n # --- 中距離 --------------------------------\n elif 0.6 > depth:\n if val == 4:\n x = 0.0\n th = -0.2\n\n elif val == 3:\n x = 0.1\n th = -0.1\n\n elif val == 2:\n x = 0.0\n th = 0.0\n\n elif val == 1:\n x = 0.1\n th = 0.1\n\n else:\n x = 0.0\n th = 0.2\n # --- 遠距離 --------------------------------------- \n #elif 1.0 > depth:\n else : \n if val == 4:\n x = 0.0\n th = -0.2\n\n elif val == 3:\n x = 0.1\n th = -0.1\n\n elif val == 2:\n x = 0.15\n th = 0.0\n\n elif val == 1:\n x = 0.1\n th = 0.1\n\n else:\n x = 0.0\n th = 0.2\n # else:\n # x=0.0\n # th=0.0\n # --- no detect green\n else :\n x = 0\n th = 0 \n\n # 更新\n print(\"blue detect x,th=\", x, th)\n twist = Twist()\n twist.linear.x = x; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th\n return twist\n\n def csv_data(self):\n \"\"\"CSVファイルから座標を取得する\"\"\"\n # csvファイルをOpen\n csv_pass = os.path.dirname(__file__) + \"/position_list.csv\"\n csv_file = open(csv_pass, \"r\")\n # データ読み込み\n pos_data = csv.reader(csv_file, delimiter=\",\", doublequote=True, lineterminator=\"\\r\\n\", quotechar='\"', skipinitialspace=True)\n # 最初の一行をヘッダーとして取得\n header = next(pos_data)\n # 各行のデータを抜き出し\n for row in pos_data:\n # データ保存用のリストにcsvファイルから取得したデータを保存する\n # appendでリストに別のリストとして要素を追加する\n self.c_data.append(row)\n\n def vel_ctrl(self, line_x, line_y, ang_z):\n \"\"\"publisher:cmd_vel Topic用(旋回で使用)\"\"\"\n vel_msg = Twist()\n vel_msg.linear.x = line_x\n vel_msg.linear.y = line_y\n vel_msg.angular.z = ang_z\n self.vel_pub.publish(vel_msg)\n\n def simple_goal_publish(self,pos_list):\n \"\"\"publisher:move_base_simple/goal Topic用(引数はリスト型で渡す)\"\"\"\n # Goal Setting\n goal = PoseStamped()\n goal.header.seq = self.goal_seq_no\n goal.header.frame_id = \"map\" # mapで座標系で指定する\n goal.header.stamp = rospy.Time.now() # タイムスタンプは今の時間\n\n self.goal_seq_no += 1 # シーケンス番号を更新\n\n # ** 位置座標\n goal.pose.position.x = float(pos_list[1])\n goal.pose.position.y = float(pos_list[2])\n goal.pose.position.z = 0\n # ** 回転方向\n # 度数をラジアンに変換\n degree_val = float(pos_list[3])\n radian_val = math.radians(degree_val)\n # オイラー角をクォータニオンに変換\n # RESPECT @hotic06 オイラー角をクォータニオンに変換・設定する\n quate = tf.transformations.quaternion_from_euler(0.0, 0.0, radian_val)\n goal.pose.orientation.x = quate[0]\n goal.pose.orientation.y = quate[1]\n goal.pose.orientation.z = quate[2]\n goal.pose.orientation.w = quate[3]\n # debug\n print(goal)\n # 実際にTopicを配信する\n self.pub_goal.publish(goal)\n\n def result_callback(self,goal_result):\n \"\"\"call back:move base result (ゴール座標到着検知)\"\"\"\n if goal_result.status.status == 3: # ゴールに到着\n self.goal_arrival_flg = True\n\n def lidarCallback(self, data):\n \"\"\"call back:lider\"\"\"\n self.scan = data\n\n def imageCallback(self, data):\n \"\"\"call back:camera image \"\"\"\n # comvert image topic to opencv object\n try:\n self.img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n # image processing\n # liderCallbackより先にimageCallbackがコールされIndexError例外に対応\n try:\n self.proc.imageProcess1(self.img, self.scan)\n #print('cwd=', self.proc.cwd)\n except IndexError as e:\n print(e)\n return\n\n # Show camera window\n if self.proc.debug_view == 1: \n cv2.imshow(\"Camera\", self.proc.img_div2) \n cv2.waitKey(1)\n\n # Show debug window\n if self.proc.debug_view == 2:\n #cv2.imshow(\"rila\", self.proc.rila_img)\n #cv2.imshow(\"div2\", self.proc.img_div2) \n #cv2.imshow(\"div8\", self.proc.img_div8)\n #cv2.imshow(\"red\", self.proc.red_img)\n #cv2.imshow(\"green\", self.proc.green_img)\n #cv2.imshow(\"blue\", self.proc.blue_img) \n #cv2.imshow(\"Camera\", self.proc.img) \n cv2.imshow(\"debug1\", self.proc.debug1_img) \n # --- add T.Ishigami 2020.03.15 22:40 ---\n # Add vertical window position for QVGA ( 960 - 260 = 700)\n # cv2.moveWindow(\"debug1\", 0, 700)\n # Add vertical window position for RHC ( 900 - 260 = 640)\n cv2.moveWindow(\"debug1\", 0, 640)\n\n cv2.waitKey(1)\n # green_index = self.proc.green_center\n # if green_index != -1:\n # green_distance = self.proc.depth_img.item(0,green_index,0)\n # else:\n # green_distance = 0\n\n\n def is_start_hunting(self):\n \"\"\"HUNTING(追跡)を開始するか判定する\n\n ・敵を発見したら「HUNT_CNT_NUM」回数待つ\n ・待ち回数を満たしたら現状態を保持して、次状態をHUNTING(追跡)にする\n \n 戻り値 True:開始する/False:開始しない\n \"\"\"\n if self.proc.green_center != -1:\n if self.hunting_cnt >= RirakkumaBot.HUNT_CNT_NUM:\n self.prev_main_state = self.main_state\n self.next_state = MainState.HUNTING\n return True\n else:\n self.hunting_cnt += 1\n else:\n self.hunting_cnt = 0\n\n return False\n\n def is_finish_hunting(self):\n \"\"\"HUNTING(追跡)を終了するか判定する\n\n ・敵を喪失したらHUNTING(追跡)状態に遷移する前状態に戻る\n \n 戻り値 True:終了する/False:終了しない\n \"\"\"\n if self.proc.green_center == -1:\n self.next_state = self.prev_main_state\n return True\n\n return False\n\n def func_state_stop(self):\n \"\"\"状態処理関数:STOP(停止)\"\"\"\n # 初期処理未実施なら、次状態はEXEC_ACTION\n if self.initialize_flg == False:\n self.initialize_flg = True\n self.next_state = MainState.EXEC_ACTION\n\n def func_state_exec_action(self):\n \"\"\"状態処理関数:EXEC_ACTION(アクション実行)\"\"\"\n # HUNTING(追跡)を開始するか判定する\n if self.is_start_hunting():\n # 開始する場合、以降の処理はしない\n return\n\n # アクションリストを読み込み\n pos_info = self.c_data[self.c_data_cnt]\n self.c_data_cnt += 1 \n # アクションリストに基づいてアクション\n if pos_info[0] == \"move\":\n # 目的地に移動 (次状態はMOVING)\n self.simple_goal_publish(pos_info)\n self.next_state = MainState.MOVING\n elif pos_info[0] == \"turn\": \n # 旋回 (状態維持)\n # 度数をラジアンに変換\n degree_val = float(pos_info[3])\n radian_val = math.radians(degree_val)\n self.vel_ctrl(0,0,radian_val)\n else:\n # 意図しないアクションの場合は次のリスト\n pass\n\n def func_state_moving(self):\n \"\"\"状態処理関数:MOVING(移動)\"\"\"\n # HUNTING(追跡)を開始するか判定する\n if self.is_start_hunting():\n # 開始する場合、以降の処理はしない\n return\n\n # 目的地に到着したら、次状態はEXEC_ACTION\n if self.goal_arrival_flg == True:\n self.goal_arrival_flg = False\n self.next_state = MainState.EXEC_ACTION\n\n def func_state_hunting(self):\n \"\"\"状態処理関数:HUNTING(追跡)\"\"\"\n # HUNTING(追跡)を終了するか判定する\n if self.is_finish_hunting():\n # 終了する場合、以降の処理は実施しない\n return\n\n # 敵の追跡を実行\n print(\"detect green\")\n self.client.cancel_goal()\n twist = self.calcTwist_center(self.proc.green_center, self.proc.green_center_depth, self.proc.green_center_S)\n print(\"#################### green_S_depth ####################\")\n print(self.proc.green_center_S, \"-\", self.proc.green_center_depth)\n print(\"#######################################################\") \n self.vel_pub.publish(twist)\n print(\"snipe_enemy\")\n\n def strategy(self):\n \"\"\"ロボット動作メイン処理(ステートマシンで制御)\"\"\"\n while not rospy.is_shutdown():\n # メイン状態処理を行う\n if self.main_state == MainState.STOP:\n # 停止\n self.func_state_stop()\n elif self.main_state == MainState.EXEC_ACTION:\n # アクション実行\n self.func_state_exec_action()\n elif self.main_state == MainState.MOVING:\n # 移動\n self.func_state_moving()\n elif self.main_state == MainState.HUNTING:\n # 追跡\n self.func_state_hunting()\n else:\n pass\n\n # DEBUG Print\n print('main_state = ',self.main_state)\n print('next_state = ',self.next_state)\n\n # メイン状態を次の状態に更新\n self.main_state = self.next_state\n # 1秒Wait\n rospy.sleep(1)\n\nif __name__ == \"__main__\":\n rospy.init_node('rirakkuma_node')\n bot = RirakkumaBot('rirakkuma')\n bot.csv_data()\n rospy.sleep(1.0) # 起動後、ウェイト(調整値)\n bot.strategy()","sub_path":"burger_war_dev/scripts/rirakkuma.py","file_name":"rirakkuma.py","file_ext":"py","file_size_in_byte":15105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"97337782","text":"import numpy as np\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom sklearn.cluster import KMeans\n\n\n# np.random.seed(6)\n\n\"\"\"initialize randomly distributed points in square\"\"\"\nsl = 2 # side length of square\nrawdata = sl*np.random.random((1000, 2))-np.array([[sl/2., sl/2.]])\nn_pts = 150\nn_sel = 100\ndof = 2 # number of spatial dimensions\nrawdata = rawdata[:n_pts, :]\n\n\"\"\"plot intial points\"\"\"\nfig = plt.figure()\nplt.scatter(rawdata[:, 0], rawdata[:, 1],\n marker='o', s=20,\n color='k', linewidths=0.0, edgecolors=None, alpha=.3,\n label='original')\n\n\n\"\"\"perform kmeans to identify seeds\"\"\"\nkmeans = KMeans(n_clusters=n_sel).fit(rawdata)\nseeds = kmeans.cluster_centers_\n\nplt.scatter(seeds[:, 0], seeds[:, 1],\n marker='s', s=15,\n color='b', linewidths=0.0, edgecolors=None, alpha=.5,\n label='targets')\n\n\n\"\"\"find the point closest to each seed\"\"\"\nrawdata_ = rawdata\nselected = np.zeros((n_sel, 2))\n\nfor ii in xrange(n_sel):\n dist = np.sum((rawdata_-seeds[ii, :])**2, 1)\n indx = np.argmin(dist)\n\n selected[ii, :] = rawdata_[indx, :]\n rawdata_ = np.delete(rawdata_, indx, axis=0)\n\n x = np.array([seeds[ii, 0], selected[ii, 0]])\n y = np.array([seeds[ii, 1], selected[ii, 1]])\n plt.plot(x, y, 'r:')\n\n\n\"\"\"plot the selected points\"\"\"\nplt.scatter(selected[:, 0], selected[:, 1],\n marker='x', s=40, c='r', edgecolors=None,\n linewidths=1.0, alpha=0.5,\n label='selected')\n\n\ntgt = 0.5\nplt.axis(tgt*np.array([-sl, sl, -sl, sl]))\n\nplt.axes().set_aspect('equal')\nplt.legend(loc='upper right', shadow=True, fontsize='medium', ncol=1)\nfig.tight_layout()\n\nplt.show()\n","sub_path":"fip_collab/2017_02_08_HCF_pearson/toy_problem/test_select_spread.py","file_name":"test_select_spread.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"281583671","text":"#!/usr/bin/env python\n\nimport sys, logging.config, json\nfrom bottle import route, post, error, run, template, static_file, request, response\nimport constant, config, operation, program\n\nlogger = logging.getLogger(__name__)\n\n####\n\n@error(404)\ndef error404(error):\n\treturn \"404 not found.\"\n\n@route('/static/')\ndef static(filename):\n\treturn static_file(filename, root='./static')\n\n@route('/')\ndef index():\n\treturn template(\n\t\t'main',\n\t)\n\n####\n\n@route('/getProgramSequence')\ndef getProgramSequence():\n\ttry:\n\t\thtml = ''\n\t\tfor op in program.getSequence():\n\t\t\thtml += template(\n\t\t\t\t'operation/disp/' + op.getType(),\n\t\t\t\toperation = op,\n\t\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/addOperation')\ndef addOperation():\n\ttry:\n\t\ttype_ = request.params['type']\n\t\top = operation.createByType(type_)\n\t\tprogram.addOperation(op)\n\t\thtml = template(\n\t\t\t'operation/edit/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/saveOperation')\ndef saveOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.operationById(id_)\n\t\top.save(request.params)\n\t\thtml = template(\n\t\t\t'operation/disp/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/editOperation')\ndef editOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.operationById(id_)\n\t\thtml = template(\n\t\t\t'operation/edit/' + op.getType(),\n\t\t\toperation = op,\n\t\t)\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/moveOperation')\ndef moveOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\tupDown = request.params['upDown']\n\t\tres = program.moveOperationById(id_, upDown)\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/deleteOperation')\ndef deleteOperation():\n\ttry:\n\t\tid_ = request.params['id']\n\t\top = program.removeOperationById(id_)\n\t\tdel op\n\t\thtml = ''\n\t\treturn json.dumps({'html' : html})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n####\n\n@route('/startProgram')\ndef startProgram():\n\ttry:\n\t\tres = program.start()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/stopProgram')\ndef stopProgram():\n\ttry:\n\t\tres = program.stop()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/isProgramRunning')\ndef isProgramRunning():\n\ttry:\n\t\tres = program.isRunning()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n@route('/getProgramLog')\ndef getProgramLog():\n\ttry:\n\t\tres = program.getLog()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n@route('/eraseProgramLog')\ndef eraseProgramLog():\n\ttry:\n\t\tres = program.eraseLog()\n\t\treturn json.dumps({'result' : res})\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\n####\n\n@route('/dumpProgram')\ndef dumpProgram():\n\ttry:\n\t\tdic = program.dump()\n\t\treturn json.dumps({'result' : dic})\n\texcept Exception as e:\n\t\treturn str(type(e)) + ' ' + e.message\n\n@post('/restoreProgram')\ndef restoreProgram():\n\ttry:\n\t\tinfo = request.files.get('file')\n\t\tif info.filename.endswith('.json'):\n\t\t\traw = info.file.read()\n\t\t\tdata = json.loads(raw)\n\t\t\tprogram.restore(data)\n\t\telse:\n\t\t\traise RuntimeError('file suffix should be : .json')\n\texcept Exception as e:\n\t\treturn __exceptionResponse(e)\n\treturn getProgramSequence()\n\n####\n\ndef __exceptionResponse(e):\n\treturn json.dumps({'error' : str(type(e)) + ' ' + e.message})\n\n####\n\ndef __startWebServer():\n\trun(server=\"tornado\", host=config.my_host, port=config.my_port, quiet=False, reloader=False)\n\ndef main(argv):\n\tlogger.debug('starting tester ...')\n\t__startWebServer()\n\n####\n\nif __name__ == \"__main__\":\n\tlogging.config.fileConfig(\"conf/logger.conf\", disable_existing_loggers=False)\n\tmain(sys.argv[1:])\n","sub_path":"startTester.py","file_name":"startTester.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}
+{"seq_id":"391715890","text":"# Copyrght(C) 2013 Huangtao\r\n# 项目:营运管理平台\r\n# 模块:导出印鉴卡密码模块\r\n# 作者:黄涛\r\n# 创建:2013-6-29\r\n\r\nfrom .basefrm import TransFrame\r\nclass SigExport(TransFrame):\r\n\r\n sql=\"select CardNo,Pwd from SigCard where CardNo\"\\\r\n \" between %s and %s order by CardNo\"\r\n \r\n initpath='D:/huangtao/Documents/工作平台/业务报表/防伪系统文件/下发/'\r\n \r\n def init(self):\r\n self['Encrypt']='1'\r\n self.branch_list['values']=\" \".join(\\\r\n self.query_list('select BranchName from branch '\\\r\n 'where Level in(1,2) order by brorder'))\r\n \r\n def process(self,cur):\r\n filename=self.initpath+'%s-(%s-%s).dat'%(self.fh,self.b,self.e)\r\n with open(filename,'w') as f: \r\n f.write('\\n\\n')\r\n if self['Encrypt']=='1':\r\n for CardNo,Pwd in cur:\r\n f.write(\" %s= %s=\\n\"\\\r\n %(encrypt(CardNo),encrypt(Pwd)))\r\n else:\r\n for CardNo,Pwd in cur:\r\n f.write(\"%s%s\\n\"%(CardNo,Pwd))\r\n self.aff_rows=cur.rowcount\r\n \r\n def submit(self):\r\n self.b=self['beginno'].zfill(8)\r\n self.e=self['endno'].zfill(8)\r\n self.fh=self['branch']\r\n if self.e\"] = 8310575403 #开始的的个数 \n\n self.word2_dict = {} #记录概率,2-gram\n self.word2_dict_count = {} #记录词频,2-gram\n\n\n self.gmax_word_length = 0\n self.all_freq = 0 #所有词的词频总和,1-gram的\n\n #估算未出现的词的概率,根据beautiful data里面的方法估算\n def get_unkonw_word_prob(self, word):\n return math.log(10./(self.all_freq*10**len(word)))\n\n #获得片段的概率\n def get_word_prob(self, word):\n if word in self.word1_dict: #如果字典包含这个词\n prob = self.word1_dict[word]\n else:\n prob = self.get_unkonw_word_prob(word)\n return prob\n\n\n #获得两个词的转移概率\n def get_word_trans_prob(self, first_word, second_word):\n trans_word = first_word + \" \" + second_word\n #print trans_word\n if trans_word in self.word2_dict_count:\n trans_prob = \\\n math.log(self.word2_dict_count[trans_word]/self.word1_dict_count[first_word])\n else:\n trans_prob = self.get_word_prob(second_word)\n return trans_prob\n\n #寻找node的最佳前驱节点\n #方法为寻找所有可能的前驱片段\n def get_best_pre_node(self, sequence, node, node_state_list):\n #如果node比最大词长小,取的片段长度以node的长度为限\n max_seg_length = min([node, self.gmax_word_length])\n pre_node_list = [] #前驱节点列表\n #获得所有的前驱片段,并记录累加概率\n for segment_length in range(1,max_seg_length+1):\n segment_start_node = node-segment_length\n segment = sequence[segment_start_node:node] #获取片段\n\n pre_node = segment_start_node #取该片段,则记录对应的��驱节点\n\n if pre_node == 0:\n #如果前驱片段开始节点是序列的开始节点,\n #则概率为转移到当前词的概率\n #segment_prob = self.get_word_prob(segment)\n segment_prob = \\\n self.get_word_trans_prob(\"\", segment)\n else: #如果不是序列开始节点,按照二元概率计算\n #获得前驱片段的前一个词\n pre_pre_node = node_state_list[pre_node][\"pre_node\"]\n pre_pre_word = sequence[pre_pre_node:pre_node]\n segment_prob = \\\n self.get_word_trans_prob(pre_pre_word, segment)\n\n pre_node_prob_sum = node_state_list[pre_node][\"prob_sum\"] #前驱节点的概率的累加值\n\n #当前node一个候选的累加概率值\n candidate_prob_sum = pre_node_prob_sum + segment_prob\n\n pre_node_list.append((pre_node, candidate_prob_sum))\n\n #找到最大的候选概率值\n (best_pre_node, best_prob_sum) = \\\n max(pre_node_list,key=lambda d:d[1])\n return (best_pre_node, best_prob_sum)\n\n #最大概率分词\n def mp_seg(self, sequence):\n sequence = sequence.strip()\n\n #初始化\n node_state_list = [] #记录节点的最佳前驱,index就是位置信息\n #初始节点,也就是0节点信息\n ini_state = {}\n ini_state[\"pre_node\"] = -1 #前一个节点\n ini_state[\"prob_sum\"] = 0 #当前的概率总和\n node_state_list.append( ini_state )\n #字符串概率为2元概率\n #P(a b c) = P(a|)P(b|a)P(c|b)\n\n #逐个节点寻找最佳前驱节点\n for node in range(1,len(sequence) + 1):\n #寻找最佳前驱,并记录当前最大的概率累加值\n (best_pre_node, best_prob_sum) = \\\n self.get_best_pre_node(sequence, node, node_state_list)\n\n #添加到队列\n cur_node = {}\n cur_node[\"pre_node\"] = best_pre_node\n cur_node[\"prob_sum\"] = best_prob_sum\n node_state_list.append(cur_node)\n #print \"cur node list\",node_state_list\n\n # step 2, 获得最优路径,从后到前\n best_path = []\n node = len(sequence) #最后一个点\n best_path.append(node)\n while True:\n pre_node = node_state_list[node][\"pre_node\"]\n if pre_node == -1:\n break\n node = pre_node\n best_path.append(node)\n best_path.reverse()\n\n # step 3, 构建切分\n word_list = []\n for i in range(len(best_path)-1):\n left = best_path[i]\n right = best_path[i + 1]\n word = sequence[left:right]\n word_list.append(word)\n\n seg_sequence = DELIMITER.join(word_list)\n return seg_sequence\n\n #加载词典,为词\\t词频的格式\n def initial_dict(self, gram1_file, gram2_file):\n #读取1_gram文件\n dict_file = open(gram1_file, \"r\")\n for line in dict_file:\n sequence = line.strip()\n key = sequence.split('\\t')[0]\n value = float(sequence.split('\\t')[1])\n self.word1_dict_count[key] = value\n #计算频率\n self.all_freq = sum(self.word1_dict_count.values()) #所有词的词频\n self.gmax_word_length = max(len(key) for key in self.word1_dict_count.keys())\n self.gmax_word_length = 20\n self.all_freq = 1024908267229.0\n #计算1gram词的概率\n for key in self.word1_dict_count:\n self.word1_dict[key] = math.log(self.word1_dict_count[key]/self.all_freq)\n\n #读取2_gram_file,同时计算转移概率\n dict_file = open(gram2_file, \"r\")\n for line in dict_file:\n sequence = line.strip()\n key = sequence.split('\\t')[0]\n value = float(sequence.split('\\t')[1])\n first_word = key.split(\" \")[0]\n second_word = key.split(\" \")[1]\n self.word2_dict_count[key] = float(value)\n if first_word in self.word1_dict_count:\n self.word2_dict[key] = \\\n math.log(value/self.word1_dict_count[first_word]) #取自然对数\n else:\n self.word2_dict[key] = self.word1_dict[second_word]\n#test\nif __name__=='__main__':\n myseg = DNASegment()\n myseg.initial_dict(\"./words_counter.txt\",\"./words_counter_2.txt\")\n with open('./mark_no_same.txt', mode = 'r', encoding = 'utf-8') as f:\n for line in f:\n sequence = line\n seg_sequence = myseg.mp_seg(sequence)\n print(\"original sequence: \" + sequence + \"segment result: \" + seg_sequence + '\\n')\n","sub_path":"Python/Tools/ngram/ngram_segment.py","file_name":"ngram_segment.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"88256839","text":"##### Programme n°2\n\n# On importe les modules qui vont nous permettre de traiter les données\n\n# matplotlib pour réaliser les graphiques\nimport matplotlib.pyplot as pyplot\n# csv pour lire les fichiers de données\nimport csv\n\n###############################################################################\"\n# Fonction de chargement des donnnées\n# Description : Charge un fichier au format suivant :\n# - la premiére ligne est la ligne d'entete\n# - la première colonne correspond aux données de l'axe x\n# param entrant : file : string : Nom du fichier à charger\n# param sortant : dictionnaire contenant les données\n# - clé : 0 à n-1, n étant le nb de colonne dans le fichier\n# - donnée associée : liste contenant les données en colonnne\n# le premier élément de la liste contient l'entête de colonne \ndef load_file(fSource) :\n dicData = {} # Dictionnaire renvoyé\n for strLine in open(fSource,\"r\").readlines(): # Boucle sur les lignes du fichier \n lstLine = strLine.rstrip('\\n').split(';')\n for i in range(0,len(lstLine)) : # Boucle sur les colonnes du fichier\n try : \n dicData[i].append(float(lstLine[i]))\n except : # initialisation du premier élément de la liste\n dicData[i] = [lstLine[i]] # pour la 1 valeur, le type associé n'est pas connu \n return dicData\n\n###############################################################################\"\n# Fonction affichage d'un graphe\n# param entrant : intGraph : integer : référence du graph\n# dicData : dinctionnary : dictionnaire des données comparatives\n# dicMesure : dinctionnary : dictionnaire des données comparatives\n# strLab_y : string : libelle axe y\n# strLab_leg : string : libelle de la legende\n# strLoc_leg : string : position de la legende\ndef graph(intGraph,dicData,dicMesure,strLab_y,strLab_leg,strLoc_leg) :\n pyplot.subplot(1, 3, intGraph) # Référence du graph pour la suite\n [pyplot.plot(dicData[0][1:],dicData[i][1:],label=dicData[i][0]+ \" \" + strLab_leg ) for i in range(1,len(dicData))]\n pyplot.legend( loc = strLoc_leg)\n pyplot.scatter(dicMesure[0][1:],dicMesure[intGraph][1:])\n pyplot.xlabel('Age en mois') \n pyplot.ylabel(strLab_y)\n pyplot.grid(True)\n\n###############################################################################\"\n# Début\n# \n\n# chargement des données light\n# Dictionnaire des normes\n# Niveau 1 :\n# Clé : Type (poids,taille)\n# Data : \n# Liste :\n# 1 : Numéro du graph\n# 2 : Dictionnaire\n# Clé : Genre \n# Data : Contenu du fichier\n# 3 : Libelle Y\n# 4 : libelle legende\n# 5 : position legende \ndicNorme ={'W':[1,{'g':load_file('poids-age-garcon-0-60-light.csv'),'f':load_file('poids-age-fille-0-60-light.csv')},'Poids en kg','poids','upper left'],'T':[2,{'g': load_file('taille-age-garcon-0-60-light.csv'),'f':load_file('taille-age-fille-0-60-light.csv')},'Taille en cm','taille','upper left'],'S':[3,{'g': load_file('perim-cra-age-garcon-0-60-light.csv'),'f':load_file('perim-cra-age-fille-0-60-light.csv') },'Périmètre en cm','périmètre','lower right']} \n\n# chargement des mesures\ndicMesure = load_file('mesures.csv')\n\n# Genre : Saisie / Vérification g ou f \nstrGenre = ''\nwhile True:\n strGenre = str(input (\"Entrez le genre de votre nourrisson ('g' pour garçon, 'f' pour fille), ctr-d pour quitter : \"))\n strGenre = strGenre.lower() # minuscule\n if strGenre == 'g' or strGenre == 'f' : \n break\n\n# Affichage du graph\n[graph(dicNorme[Mykey][0],dicNorme[Mykey][1][strGenre],dicMesure,dicNorme[Mykey][2],dicNorme[Mykey][3],dicNorme[Mykey][4]) for Mykey in dicNorme.keys()]\npyplot.show()","sub_path":"nourrisson-step2.py","file_name":"nourrisson-step2.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"449857962","text":"from gtts import gTTS\nimport os\n\nprint(\"\\nINPUT THE NUMBER OF TEXT FILES THAT YOU WANT TO CONVERT INTO AUDIO: \",end=\"\")\n\nn=int(input())\n\nfor i in range(0,n):\n\n print(\"\\nGIVE THE TEXT FILE NAME(.txt format): \",end=\"\")\n txt=input() \n\n f=open(txt)\n x=f.read()\n\n language='en'\n\n audio=gTTS(text=x,lang=language)\n\n print(\"\\nGIVE THE AUDIO FILE NAME IN WHICH OUTPUT WILL BE SAVED(.wav or .mp3 format): \",end=\"\")\n aud=input()\n\n audio.save(aud)\n os.system(aud)\n","sub_path":"Text to Speech/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"621226883","text":"#!/usr/bin/python\n\n# Import necessary modules\nimport os\nimport csv\nimport datetime\n\n# Gather/manipulate date information for csv naming/decisions\ntimestamp = datetime.datetime.now()\ntimestamp_string = str(timestamp) # Make string to use split()\ndate, t = timestamp_string.split(' ') # Separate into date and time\nyear, month, day = date.split('-') # Split up elements of date\nmon_yr = month + '-' + year # Recombine month/year\n\n\ndef main():\n logfile = '/home/pi/Documents/Speedtest_Logger/Speedtest_Logfiles/'\n +mon_yr+'.csv'\n outfile = open(logfile, 'a', newline='') # Open file\n writer = csv.writer(outfile) # Create new\n print('Retrieving speed test data...')\n ping, download, upload = get_speedtest_data() # Store data\n writer.writerow([date, t, ping, download, upload]) # Write data\n outfile.close()\n\n\ndef get_speedtest_data():\n '''New function to pipe in data from the speedtest terminal function'''\n speedtest_output = os.popen('speedtest-cli --simple') # Call terminal com\n\n # Set variables to have no data\n ping = download = upload = 0\n\n for line in speedtest_output: # Loop through lines in speedtest\n label, value, unit = line.split(' ') # Split line into three\n\n # Store values in correct variable based on the label in outpu\n if 'Ping' in label:\n ping = float(value)\n elif 'Download' in label:\n download = float(value)\n # Return all values IF all values were parsed\n if all((ping, download, upload)):\n print('Data logged successfully!')\n print('Ping: ' + str(ping) + ' ms')\n print('Download: ' + str(download) + ' Mbps')\n print('Upload: ' + str(upload) + ' Mbps')\n return ping, download, upload\n else:\n print('Values not logged.')\n\n# Runs main() function. Used to make python programs similar to C/C++.\nif __name__ == '__main__':\n main()\n","sub_path":"speedtest_logger.py","file_name":"speedtest_logger.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"44608566","text":"\"\"\" Compiled: 2020-09-18 10:38:50 \"\"\"\n\n#__src_file__ = \"extensions/bdp_benchmark_test/./etc/FBDPBenchmarkTest_Perform.py\"\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FBDPBenchmarkTest_Perform - Module which performs BDPBenchmarkTest.\n\n Requirements:\n \n BDP benchmark test creates instruments, prices, counterparties,\n acquirers, and trades measure the time taken. It also can clone\n the existing trades and measure performance. All the test data\n can be cleaned after the tests complete.\n\nDESCRIPTION\n This module performs the BDP benchmark test based on the\n parameters passed from the script FBDPBenchmarkTest.\n\n----------------------------------------------------------------------------\"\"\"\n\nimport sys\nimport random\nimport time\nfrom contextlib import contextmanager\nimport acm\nimport ael\nimport FBDPCommon\nfrom FBDPCurrentContext import Summary\nfrom FBDPCurrentContext import Logme\n\nINSTRUMENT_NAME_PREFIX = 'BDPBenchmark_INS_'\nSTOCK_NAME_PREFIX = 'BDPBenchmark_INS_STOCK_'\nPORTFOLIO_NAME_PREFIX = 'BDPBenchmarkPort_'\nCOUNTERPARTY_NAME_PREFIX = 'BDPBenchmarkCP_'\nACQUIRER_NAME_PREFIX = 'BDPBenchmarkACQ_'\nNUMBER_OF_PORTFOLIOS = 4\nNUMBER_OF_COUNTERPARTIES = 10\nNUMBER_OF_ACQUIRERS = 10\nPRICEMARKET = 'SPOT'\n\n\ndef perform_test(execParam):\n e = BenchmarkTest(execParam)\n e.perform()\n Summary().log(execParam)\n Logme()(None, 'FINISH')\n\n\ndef getChangeFactor(minFact=0.8, maxFact=1.25):\n return round(float(random.uniform(minFact, maxFact)), 2)\n\n\ndef createCounterParty(name, *argv):\n cpty = acm.FCounterParty()\n cpty.Name(name)\n cpty.Commit()\n Summary().ok(cpty, Summary().CREATE, cpty.Oid())\n return cpty\n\n\ndef createPhysicalPortfolio(name, *argv):\n prf = acm.FPhysicalPortfolio()\n prf.Name(name)\n prf.AssignInfo(name)\n prf.Currency(acm.FCurrency['EUR'])\n prf.Commit()\n Summary().ok(prf, Summary().CREATE, prf.Oid())\n return prf\n\n\ndef createInternalDepartment(name, *argv):\n acq = acm.FInternalDepartment()\n acq.Name(name)\n acq.Commit()\n Summary().ok(acq, Summary().CREATE, acq.Oid())\n return acq\n\n\ndef getEntities(namePrefix, entityName):\n return acm.GetClass('F' + entityName).Select(\n 'name like %s' % (namePrefix + '*'))\n\n\ndef getEntityNames(namePrefix, entityName):\n return [e.Name() for e in\n getEntities(namePrefix, entityName)]\n \n\ndef getEntityStartIndex(namePrefix, entityName):\n return getEntities(namePrefix, entityName).Size()\n\n\ndef createEntities(namePrefix, entityName, numberOfEntities, *argv):\n eList = []\n startIndex = getEntityStartIndex(namePrefix, entityName)\n for i in range(numberOfEntities):\n uName = namePrefix + str(i + 1 + startIndex)\n cpty = getattr(sys.modules[__name__],\n \"create%s\" % entityName)(uName, argv)\n eList.append(cpty)\n return eList\n\n\ndef DeleteEntities(namePrefix, entityName):\n for name in getEntityNames(namePrefix, entityName):\n Logme()(name, \"DEBUG\")\n Summary().ok(acm.GetClass('F' + entityName)[name], Summary().DELETE, name)\n acm.GetClass('F' + entityName)[name].Delete()\n\n\ndef getRandomDate(lastDate, firstDate):\n days = acm.Time.DateDifference(lastDate, firstDate)\n if days < 0:\n raise RuntimeError('lastDate has to larger than firstDate')\n offset = random.randint(0, days)\n return acm.Time.DateAddDelta(firstDate, 0, 0, offset)\n\n\n@contextmanager \ndef measureTime(title):\n t1 = time.clock()\n yield\n t2 = time.clock()\n Logme()('%s: %0.2f seconds elapsed' % (title, t2 - t1))\n\nclass BenchmarkTest(object):\n \n def createInstruments(self):\n instruments = []\n self.startIndex = getEntityStartIndex(STOCK_NAME_PREFIX, 'Instrument')\n\n for i in range(self.numberOfInstruments):\n insName = STOCK_NAME_PREFIX\\\n + str(i + 1 + self.startIndex)\n ins = Stock(insName)\n instruments.append(ins)\n return instruments\n\n def readArguments(self, execParam):\n self.lastTradeDate = FBDPCommon.toDate(\n execParam.get('lastTradeDate', 'Today'))\n self.firstTradeDate = FBDPCommon.toDate(\n execParam.get('firstTradeDate', '-6m'))\n\n self.numberOfInstruments = execParam.get('numberOfInstruments', 1)\n self.numberOfTradesPerIns = execParam.get('numberOfTradesPerIns', 1000) \n self.clone = execParam.get('simulate', 0)\n self.createNew = execParam.get('createNew', 0)\n self.cleanUp = execParam.get('cleanUp', 0)\n self.simulateIns = execParam.get('Instruments', None)\n self.prfs = execParam.get('TradingPortfolios', None)\n self.numberOfClone = execParam.get('numberOfCloneTrades', 10)\n \n \n def __init__(self, execParam):\n self.readArguments(execParam)\n\n def doCleanUp(self):\n\n for p in getEntities(PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio'):\n tOids = [t.Oid() for t in p.Trades()]\n for oid in tOids:\n Logme()(oid, \"DEBUG\")\n Summary().ok(acm.FTrade[oid], Summary().DELETE, oid)\n acm.FTrade[oid].Delete()\n\n DeleteEntities(PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio')\n DeleteEntities(COUNTERPARTY_NAME_PREFIX, 'CounterParty')\n DeleteEntities(ACQUIRER_NAME_PREFIX, 'InternalDepartment')\n\n names = getEntityNames(INSTRUMENT_NAME_PREFIX, 'Instrument')\n for n in names:\n Logme()(n, \"DEBUG\")\n query = ('instrument={0} and currency={1}'.format(\n acm.FInstrument[n].Oid(), acm.FInstrument[n].Currency().Oid()))\n prices = acm.FPrice.Select(query)\n pOids = [p.Oid() for p in prices]\n for p in pOids:\n Logme()(p, \"DEBUG\")\n Summary().ok(acm.FPrice[p], Summary().DELETE, p)\n acm.FPrice[p].Delete()\n Summary().ok(acm.FInstrument[n], Summary().DELETE, n)\n acm.FInstrument[n].Delete()\n \n\n def perform(self):\n if self.cleanUp:\n Logme()('Clean up test data.....', \"DEBUG\")\n self.doCleanUp()\n elif self.createNew:\n Logme()('Create new data.....', \"DEBUG\")\n with measureTime('Portfolios creation time'):\n self.portfolioNames = createEntities(\n PORTFOLIO_NAME_PREFIX, 'PhysicalPortfolio', NUMBER_OF_PORTFOLIOS)\n with measureTime('Counterparties creation time'):\n self.counterparties = createEntities(\n COUNTERPARTY_NAME_PREFIX, 'CounterParty', NUMBER_OF_COUNTERPARTIES)\n with measureTime('Acquirers creation time'):\n self.acquirers = createEntities(\n ACQUIRER_NAME_PREFIX, 'InternalDepartment', NUMBER_OF_ACQUIRERS)\n \n with measureTime('Instruments creation time'):\n self.instruments = self.createInstruments() \n\n with measureTime('Prices creation time'):\n for ins in self.instruments:\n ins.createPrice(self.lastTradeDate)\n with measureTime('Trades creation time'):\n for ins in self.instruments:\n ins.createTrades(self.numberOfTradesPerIns,\n self.lastTradeDate, self.firstTradeDate,\n self.portfolioNames, self.counterparties,\n self.acquirers)\n elif self.clone:\n Logme()('Cloning existing data.....', \"DEBUG\")\n \n with measureTime('Instruments clone time'):\n for ins in self.simulateIns:\n insClone = ins.Clone()\n insClone.Name(INSTRUMENT_NAME_PREFIX + ins.Name())\n insClone.Commit()\n with measureTime('Trades clone time'):\n for ins in self.simulateIns:\n count = 0\n for t in ins.Trades():\n tc = t.Clone()\n tc.Instrument(acm.FInstrument[\n INSTRUMENT_NAME_PREFIX + ins.Name()])\n tc.Status('Simulated')\n tc.Commit()\n tc.ConnectedTrade(tc)\n tc.Commit()\n count = count + 1\n if count >= self.numberOfClone:\n break\n\n\nclass InstrumentBase(object):\n def __init__(self, ins_name):\n self.ins.Name(ins_name)\n \n def createPrice(self, price_date, price, price_market=PRICEMARKET):\n ael_date = ael.date(price_date)\n \n ael_market = ael.Party[price_market]\n ael_ins = ael.Instrument[self.ins.Name()]\n try:\n newPrice = ael.Price.new()\n newPrice.insaddr = ael_ins\n newPrice.curr = ael_ins.curr\n newPrice.day = ael_date\n newPrice.ptynbr = ael_market\n newPrice.bid = price\n newPrice.ask = price\n newPrice.last = price\n newPrice.settle = price\n newPrice.commit()\n Summary().ok(newPrice, Summary().CREATE, newPrice.prinbr)\n return price\n\n except Exception as msg:\n if (ael_date == ael.date_today()) and (ael_market.type == 'Market'):\n for p in ael_ins.prices():\n if (ael_market == p.ptynbr):\n thePrice = p\n break\n else:\n for p in ael_ins.historical_prices():\n if (ael_date == p.day) and (ael_market == p.ptynbr):\n thePrice = p\n break\n updPrice = thePrice.clone()\n updPrice.day = ael_date\n updPrice.bid = price\n updPrice.ask = price\n updPrice.last = price\n updPrice.settle = price\n updPrice.commit()\n return price\n\n\nclass Stock(InstrumentBase):\n def __init__(self, ins_name):\n self.ins = acm.FStock()\n super(Stock, self).__init__(ins_name)\n self.ins.Commit()\n Summary().ok(self.ins, Summary().CREATE, self.ins.Oid())\n\n def createPrice(self, price_date, price=None):\n if not price:\n price = 100 * getChangeFactor()\n else:\n price = float(price)\n return super(Stock, self).createPrice(price_date, price)\n \n def createTrades(self, numberOfTrades, lastDate, firstDate,\n portfolioList, cpList, acList):\n\n numberOfPortfolios = len(portfolioList)\n numberOfCP = len(cpList)\n numberOfAcq = len(acList)\n for i in range(numberOfTrades):\n acm.BeginTransaction()\n trd=acm.FTrade()\n trd.Currency(self.ins.Currency())\n trd_time = getRandomDate(lastDate, firstDate)\n trd.TradeTime(trd_time)\n val_day = FBDPCommon.businessDaySpot(self.ins, trd_time)\n trd.ValueDay(val_day)\n trd.AcquireDay(val_day)\n trd.Quantity(100)\n price = 100 * getChangeFactor()\n trd.Price(price)\n trd.Portfolio(portfolioList[random.randint(0, numberOfPortfolios - 1)])\n trd.Counterparty(cpList[random.randint(0, numberOfCP - 1)])\n trd.Acquirer(acList[random.randint(0, numberOfAcq - 1)])\n trd.Instrument(self.ins)\n trd.UpdatePremium(True)\n trd.Status('Simulated')\n trd.Commit()\n Summary().ok(trd, Summary().CREATE, trd.Oid())\n acm.CommitTransaction()\n\n","sub_path":"Extensions/BDP Benchmark Test/FPythonCode/FBDPBenchmarkTest_Perform.py","file_name":"FBDPBenchmarkTest_Perform.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"189905593","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport mpl_toolkits.mplot3d.axes3d as p3\r\nfrom matplotlib import animation\r\nimport copy\r\nimport math\r\n\r\nclass integration_and_plot:\r\n \"\"\"\r\n This class will contains two functions. First integrates linear accelation data. \r\n Second function plots the integrated data.\r\n Input is the list of linear acceleration data with timestamps from\r\n data_processor_pandas.py .\r\n \"\"\"\r\n \r\n def __init__(self, AccandTime):\r\n \"\"\"\r\n \"\"\"\r\n self.inputData = AccandTime\r\n \r\n def integration(self): \r\n accelerationVector = []\r\n timesOfAcc = []\r\n velocityVector = []\r\n positionVector = []\r\n accelerationMean = np.array([-0.00089, 0.00118, 0.05586])\r\n #The mean of the static acceleration is subtracted from the acceleration arrays, and all is rounded to 5dp\r\n for i in range(len(self.inputData)):\r\n accelerationVector.append( np.around( np.array(self.inputData[i][1] - accelerationMean), decimals = 5) )\r\n timesOfAcc.append(self.inputData[i][0])\r\n \r\n # low pass filter to remove noise from the LiDAR sensor\r\n\r\n for i in range(len(self.inputData)):\r\n if abs(accelerationVector[i][0]) < 1.4:\r\n accelerationVector[i][0] = 0\r\n if abs(accelerationVector[i][1]) < 1.4:\r\n accelerationVector[i][1] = 0\r\n else:\r\n pass\r\n\r\n\r\n #integration of acceleration to velocity\r\n tempVelocity = np.zeros(3)\r\n for i in range(len(accelerationVector) - 1):\r\n tempVelocity = tempVelocity + (accelerationVector[i+1] + accelerationVector[i])*(timesOfAcc[i+1] - timesOfAcc[i])*0.5 \r\n velocityVector.append(tempVelocity)\r\n\r\n #need to pop the first timestamp so that lists match,\r\n #as we are using trapezium rule to integrate and with a short enough period between values\r\n #this has a negigible affect on precision\r\n timesOfVel = copy.deepcopy(timesOfAcc)\r\n timesOfVel.pop(0)\r\n\r\n #integration of velocity to position\r\n tempPosition = np.zeros(3)\r\n for i in range(len(velocityVector) - 1):\r\n tempPosition = tempPosition + (velocityVector[i+1] + velocityVector[i])*(timesOfVel[i+1] - timesOfVel[i])*0.5\r\n positionVector.append(tempPosition)\r\n\r\n timesOfPos = copy.deepcopy(timesOfVel)\r\n timesOfPos.pop(0)\r\n\r\n #provide the object new parameters so that the plotting method can use them\r\n self.timesOfPos = timesOfPos\r\n self.timesOfVel = timesOfVel\r\n self.timesOfAcc = timesOfAcc\r\n self.accelerationVector = accelerationVector\r\n self.velocityVector = velocityVector\r\n self.positionVector = positionVector\r\n\r\n def plotting(self, fileNameForSaving):\r\n # source for increasing the font size, default font size is 10: https://stackoverflow.com/a/3900167\r\n plt.rcParams.update({'font.size': 11})\r\n\r\n currentAccX = []\r\n currentAccY = []\r\n currentAccZ = []\r\n \r\n for i in range(len(self.accelerationVector)):\r\n currentAccX.append(self.accelerationVector[i][0])\r\n currentAccY.append(self.accelerationVector[i][1])\r\n currentAccZ.append(self.accelerationVector[i][2])\r\n\r\n #This line of code, if activated for a stationary data file, this will print the mean acceleration of the file. \r\n #This can be used to find the bias acceleration for the inertial measurement unit.\r\n #print('The mean of the acceleration data is (in order of X,Y,Z):\\n%f\\n%f\\n%f'%(np.mean(currentAccX), np.mean(currentAccY), np.mean(currentAccZ) ))\r\n \r\n plt.plot(self.timesOfAcc, currentAccZ, '-', label = 'Z Acceleration')\r\n plt.plot(self.timesOfAcc, currentAccY, '-', label = 'Y Acceleration')\r\n plt.plot(self.timesOfAcc, currentAccX, '-', label = 'X Acceleration')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Linear Acceleration (ms$^{-2}$)')\r\n plt.minorticks_on()\r\n # plt.title('Acceleration over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_acceleration.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n currentVelX = []\r\n currentVelY = []\r\n currentVelZ = []\r\n \r\n for i in range(len(self.velocityVector)):\r\n currentVelX.append(self.velocityVector[i][0])\r\n currentVelY.append(self.velocityVector[i][1])\r\n currentVelZ.append(self.velocityVector[i][2])\r\n \r\n plt.plot(self.timesOfVel, currentVelX, '-', label = 'X Velocity')\r\n plt.plot(self.timesOfVel, currentVelY, '-', label = 'Y Velocity')\r\n plt.plot(self.timesOfVel, currentVelZ, '-', label = 'Z Velocity')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Velocity (ms$^{-1}$)')\r\n # plt.title('Velocity over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_velocity.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n currentPosX = []\r\n currentPosY = []\r\n currentPosZ = []\r\n \r\n for i in range(len(self.positionVector)):\r\n currentPosX.append(self.positionVector[i][0])\r\n currentPosY.append(-1 * self.positionVector[i][1]) # there is a minus one here because it seems the NGIMU was backwards the whole time\r\n currentPosZ.append(self.positionVector[i][2])\r\n \r\n plt.plot(self.timesOfPos, currentPosX, '-', label = 'X Position')\r\n plt.plot(self.timesOfPos, currentPosY, '-', label = 'Y Position')\r\n plt.plot(self.timesOfPos, currentPosZ, '-', label = 'Z Position')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Position (m)')\r\n #plt.title('Position over time of NGIMU')\r\n plt.legend(loc = 2)\r\n plt.savefig(\"%s_position.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n \r\n # the following section of code will generate a 2d plot of position in x vs position in y.\r\n # It also contains a dummy plot, which will instead return the final magnitude of position \r\n # of the sensor (since we won't be showing corner test diagrams). If we want to add corner\r\n # test diagrams, these won't really work, maybe instead calculate the difference in displacement?\r\n plt.plot(currentPosX, currentPosY, '-')\r\n # dummy plot\r\n plt.plot([], [], ' ', label=\"Vehicle moved %s m overall\"%(np.around(\r\n np.sqrt(currentPosX[-1] * currentPosX[-1] + currentPosY[-1] * currentPosY[-1])\r\n , decimals=2)))\r\n plt.legend(frameon=False)\r\n plt.xlabel('Position over time in x (m)')\r\n plt.ylabel('Position over time in y (m)')\r\n plt.savefig(\"%s_2d_position.jpg\"%(fileNameForSaving), bbox_inches='tight')\r\n plt.show()\r\n\r\n \"\"\"at this point, we have the new 3D animation code.\r\n This has been cobbled together from various sources, but\r\n mainly from matplotlibanimator.py, after using matplotlibanimator_3.py \r\n to understand the core concepts.\r\n \"\"\"\r\n \"\"\"\r\n fig = plt.figure()\r\n ax = p3.Axes3D(fig)\r\n\r\n \r\n This section of code will be used to try and reduce the animation and playing time. For example, the 30s of real data played out as closer to \r\n 2mins 30s of gif. As a result, we will try selecting fewer of the data points, 1 in 10, 1 in 5 for example, and pass this shortened list to\r\n the animation functions, like gen and FuncAnimation.\r\n \r\n #remainder/modulo operator: https://stackoverflow.com/a/5584604\r\n currentPosXShort = [currentPosX[0]]\r\n currentPosYShort = [currentPosY[0]]\r\n currentPosZShort = [currentPosZ[0]]\r\n timesOfPosShort = [self.timesOfPos[0]]\r\n \r\n for i in range(len(self.timesOfPos)):\r\n if i % 5 == 0:\r\n currentPosXShort.append(currentPosX[i])\r\n currentPosYShort.append(currentPosY[i])\r\n currentPosZShort.append(currentPosZ[i])\r\n timesOfPosShort.append(self.timesOfPos[i])\r\n\r\n def gen():\r\n i = 0\r\n while i < len(timesOfPosShort):\r\n yield np.array([currentPosXShort[i], currentPosYShort[i], currentPosZShort[i]])\r\n i += 1\r\n \r\n def update(num, data, line):\r\n line.set_data(data[:2, :num])\r\n line.set_3d_properties(data[2, :num])\r\n \r\n data = np.array(list(gen())).T\r\n line, = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1]) #nope, no error here.\r\n \r\n #Setting the axes properties\r\n ax.set_xlim3d([-2, 2])\r\n ax.set_xlabel('X Position (m)')\r\n \r\n ax.set_ylim3d([-2, 2])\r\n ax.set_ylabel('Y Position (m)')\r\n \r\n ax.set_zlim3d([-2, 2])\r\n ax.set_zlabel('Z Position (m)')\r\n \r\n ani = animation.FuncAnimation(fig, update, len(timesOfPosShort), fargs=(data, line), interval=1, blit=False)\r\n #ani.save('matplot003.mp4', writer='ffmpeg')\r\n plt.show()\r\n \r\n #source: https://stackoverflow.com/a/38121759\r\n\r\n #23/11 more animation guidance: https://stackoverflow.com/a/28077104\r\n #change of animation writer to ffmpeg, can only be done on machines that can install software: https://stackoverflow.com/a/31193532\r\n\r\n #and in the comments of that:\r\n \r\n If saving as video instead of .gif then ani.save('test.mp4', writer='ffmpeg', codec='h264') should replace the last line. \r\n If you want to find out which codecs are available then run ffmpeg -codec in the terminal. \r\n Given that you want to use ffmpeg as the writer. \r\n \r\n \"\"\"\r\n \r\n # This code will produce a 2d animation of the position x vs y. \r\n fig = plt.figure()\r\n ax = plt.axes(xlim=(-0.01, 0.3), ylim=(-0.01, 0.15), xlabel= ('x Position over time (m)'), ylabel=('y Position over time (m)'))\r\n\r\n def gen():\r\n i = 0\r\n while i < len(currentPosX):\r\n yield np.array([currentPosX[i], currentPosY[i]])\r\n i += 1\r\n \r\n def update(num, data, line):\r\n line.set_data(data[:2, :num])\r\n \r\n data = np.array(list(gen())).T\r\n line, = ax.plot(data[0, 0:1], data[1, 0:1]) #nope, no error here.\r\n \r\n ani = animation.FuncAnimation(fig, update, len(currentPosX), fargs=(data, line), interval=1, blit=False)\r\n # ani.save('matplot003.gif')\r\n plt.show()\r\n\r\n\r\n\r\n\r\n","sub_path":"NGIMU processing/NGIMU_integration_and_plotting.py","file_name":"NGIMU_integration_and_plotting.py","file_ext":"py","file_size_in_byte":10680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"125097717","text":"class User:\n active_users = 0\n\n def __init__(self, first, last, age):\n self.first = first\n self.last = last\n self.age = age\n User.active_users += 1\n\n @classmethod\n def display_active_users(cls):\n print(f\"There are currently {cls.active_users} active users\")\n\n @classmethod\n def from_string(cls, data_str):\n fst, lst, age = data_str.split()\n return cls(fst, lst, int(age))\n\n def logout(self):\n User.active_users -= 1\n\n def __repr__(self):\n return f\"User {self.first} {self.last} aged {self.age}\"\n\nUser.display_active_users()\njane = User(\"Jane\", \"Doe\", 18)\npaul = User.from_string(\"Paul Rep 19\")\n\nprint(jane)\nprint(paul)\npaul.logout()\nUser.display_active_users()\n\n","sub_path":"py_z_h/oop/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"539854183","text":"import numpy as np \n\ntrain_seq = np.genfromtxt('../data/digits4000_txt/digits4000_trainset.txt').astype(np.uint16) # (2000,2)\ntest_seq = np.genfromtxt('../data/digits4000_txt/digits4000_testset.txt').astype(np.uint16) # (2000,2)\n\n# image and label\ndigits_vec = np.genfromtxt('../data/digits4000_txt/digits4000_digits_vec.txt') # (4000,28,28)\ndigits_vec = digits_vec.reshape(len(digits_vec), 28, 28).astype(np.uint8)\ndigits_labels = np.genfromtxt('../data/digits4000_txt/digits4000_digits_labels.txt').astype(np.uint8) # (4000,)\n\nx_train = digits_vec[train_seq[:,0] - 1]\ny_train = digits_labels[train_seq[:,1] - 1]\n\nx_test = digits_vec[test_seq[:,0] - 1]\ny_test = digits_labels[test_seq[:,1] - 1]\n\n# challenge test image and label\nx_test1 = np.genfromtxt('../data/challenge/cdigits_digits_vec.txt')\nx_test1 = x_test1.reshape(len(x_test1), 28, 28).astype(np.uint8)\ny_test1 = np.genfromtxt('../data/challenge/cdigits_digits_labels.txt').astype(np.uint8)","sub_path":"1-Machine-Learning/Tools/Neural-Network/Image-Classification/Mnist/tensorflow/use_other_data.py","file_name":"use_other_data.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"413896371","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass dataClassCNN:\n def __init__(self, datapath):\n X_train, y_train = load_mnist(datapath, kind='train')\n X_test, y_test = load_mnist(datapath, kind='t10k')\n\n self.numbOfTrainSamples = X_train.shape[0]\n self.numbOfTestSamples = X_test.shape[0]\n\n #reshape to 28x28\n X_train = np.resize(X_train, (self.numbOfTrainSamples, 28, 28))\n X_test = np.resize(X_test, (self.numbOfTestSamples, 28, 28))\n\n #add depth channel\n X_train = X_train[:,:,:,np.newaxis]\n X_test = X_test[:, :, :, np.newaxis]\n\n #cast to float32\n X_train = X_train.astype(dtype=np.float32)\n X_test = X_test.astype(dtype=np.float32)\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n\n # plt.figure()\n # plt.imshow(X_train[1,:,:,0])\n # plt.colormaps('gray')\n\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n self.numbOfClasses = 10\n self.numbOfFeatures = [X_train.shape[1], X_train.shape[2], X_train.shape[3]]\n self.label_strings = ['T-shirt / top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n self.testCounter = 0\n self.test_batch_size = 500\n return\n\n def next_training_batch(self, batch_size):\n ind = np.random.randint(self.numbOfTrainSamples, size=batch_size)\n y_onehot = np.zeros((batch_size, self.numbOfClasses))\n y_onehot[np.arange(batch_size), self.y_train[ind]] = 1\n return self.X_train[ind, :,:,:], y_onehot\n\n def get_test_data(self):\n ind = np.linspace(self.testCounter*self.test_batch_size, (self.testCounter+1)*self.test_batch_size-1, num=self.test_batch_size, dtype=np.int32)\n y_onehot = np.zeros((self.test_batch_size, self.numbOfClasses))\n y_onehot[np.arange(self.test_batch_size), self.y_test[ind]] = 1\n self.testCounter = self.testCounter + 1\n if self.testCounter*self.test_batch_size >= self.numbOfTestSamples:\n self.testCounter = 0\n return self.X_test[ind, :,:,:], y_onehot\n\n\ndef load_mnist(path, kind='train'):\n import os\n import gzip\n import numpy as np\n\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels\n\n","sub_path":"INF5860/convNet_solution/utils/dataClass.py","file_name":"dataClass.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}
+{"seq_id":"303768744","text":"import tkinter as tk\nfrom Panels.EditorPanel import *\nfrom Panels.Components.EditTestPanel import *\nfrom Panels.Components.EditGroupPanel import *\nfrom Panels.Helper.constants import *\nfrom Panels.Helper.CustomWidgets import *\n\n# Main UI to hold the EditGroupPanel and EditTestPanel\n# Primary interface for passing specific test groups and tests\n# between the TestPanel and the Edit*Panels\nclass EditorPanel(Panel):\n def __init__(self, root, app):\n Panel.__init__(self, root, app, 'Editor Panel', EDITOR_WIDTH, EDITOR_HEIGHT)\n self._edit_group_panel = EditGroupPanel(self)\n self._edit_test_panel = EditTestPanel(self)\n\n # Execute the given test\n def execute_code(self, test):\n if self._edit_test_panel.update() and \\\n self.app.global_panel.validate_variables():\n self.app.terminal.execute_code(test)\n\n # Toggle/change the group panel\n def toggle_group_panel(self, group):\n if group is self._edit_group_panel.group:\n success = self._edit_group_panel.close()\n if success:\n self.toggle_test_panel(None, all_tests=True)\n else:\n success = self._edit_group_panel.set(group)\n\n if success: self._edit_test_panel.close()\n\n # Toggle/change the test panel\n def toggle_test_panel(self, test, all_tests=False):\n if test is self._edit_test_panel.test or all_tests:\n self._edit_test_panel.close()\n else:\n self._edit_test_panel.set(test)\n\n # Save all values currently stored in the UI\n # Returns whether or not the save was successful\n # need to do this separately to not toggle multiple errors\n def save_all(self):\n if not self._edit_test_panel.update():\n return False\n return self._edit_group_panel.update()\n\n # Close the panel\n def close_panel(self):\n if not self._edit_test_panel.close():\n return False\n return self._edit_group_panel.close()","sub_path":"Panels/EditorPanel.py","file_name":"EditorPanel.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"653092183","text":"from django.urls import path\r\nfrom django.contrib.auth import views as auth_views\r\nfrom . import views\r\n\r\napp_name = 'Schedule'\r\n\r\nurlpatterns = [\r\n # /Schedule/\r\n path('', views.index_view.as_view(), name='index'),\r\n \r\n # /Schedule/register\r\n path('register/', views.user_form_view.as_view(), name='register'),\r\n \r\n # /Schedule/71 (SPACES ARE IMPORTANT LOL)\r\n path('/', views.detail_view.as_view(), name='detail'),\r\n \r\n path('apts//', views.detail_view_apt.as_view(), name='detail_apt'),\r\n \r\n \r\n #/Schedule/apt/add/\r\n path('apt/add//', views.apt_create.as_view(),name=\"apt-add\"),\r\n \r\n #/Schedule/apt/#\r\n path('apt/update//', views.apt_update.as_view(), name=\"apt-update\"),\r\n \r\n #/Schedule/apt/#\r\n path('apt//delete/', views.apt_delete.as_view(), name=\"apt-delete\"),\r\n \r\n # /Schedule/login\r\n path('logout/', views.user_logout, name='logout'),\r\n path('login/', auth_views.login, {'template_name': 'Schedule/login.html'}, name='login'),\r\n \r\n #/Schedule/class_list/\r\n path('class_list//', views.user_class_list_detail.as_view(), name='class_list'),\r\n \r\n #===========================================================================\r\n # REST API \r\n #===========================================================================\r\n path('apts/all/', views.ApppointmentList.as_view(), name=\"jsonapt\"),\r\n path('users/all/', views.UserList.as_view(), name=\"jsonuser\"),\r\n path('classes/all/', views.ClassesList.as_view(), name=\"jsonclass\"),\r\n path('class_dates/all/', views.ClassDatesList.as_view(), name=\"jsondate\"),\r\n]","sub_path":"Schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"360150334","text":"import time\nimport Adafruit_CharLCD as LCD\nfrom requests import get\nfrom datetime import datetime\nimport socket\n\n# Checking IP addresses\n\ntestIP = \"8.8.8.8\"\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect((testIP, 0))\nipaddr = s.getsockname()[0]\neipaddr = get('https://api.ipify.org').text\nmessage = ipaddr + \"\\n\" + eipaddr\n\n# Raspberry Pi pin setup\nlcd_rs = 25\nlcd_en = 24\nlcd_d4 = 23\nlcd_d5 = 17\nlcd_d6 = 18\nlcd_d7 = 22\nlcd_backlight = 2\n\n# Define LCD column and row size for 16x2 LCD.\nlcd_columns = 16\nlcd_rows = 2\n\nlcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)\nlcd.clear()\nlcd.message(message)\n# Wait 5 seconds\n\n","sub_path":"E-hearing/bootIP/bootIP.py","file_name":"bootIP.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"470192295","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Basic example for a bot that awaits an answer from the user\n# This program is dedicated to the public domain under the CC0 license.\n\nimport logging\nfrom telegram import Emoji, ForceReply, ReplyKeyboardMarkup\nfrom telegram.ext import Updater\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - '\n '%(message)s',\n level=logging.INFO)\n\n# Define the different states a chat can be in\nMENU, AWAIT_CONFIRMATION, AWAIT_INPUT = range(3)\n\n# Python 2 and 3 unicode differences\ntry:\n YES, NO = (Emoji.WHITE_HEAVY_CHECK_MARK.decode('utf-8'),\n Emoji.CROSS_MARK.decode('utf-8'))\nexcept AttributeError:\n YES, NO = (Emoji.THUMBS_UP_SIGN, Emoji.THUMBS_DOWN_SIGN)\n\n# States are saved in a dict that maps chat_id -> state\nstate = dict()\n# Sometimes you need to save data temporarily\ncontext = dict()\n# This dict is used to store the settings value for the chat.\n# Usually, you'd use persistence for this (e.g. sqlite).\nvalues = dict()\n\n\n# Example handler. Will be called on the /set command and on regular messages\ndef set_value(bot, update):\n chat_id = update.message.chat_id\n user_id = update.message.from_user.id\n text = update.message.text\n chat_state = state.get(chat_id, MENU)\n chat_context = context.get(chat_id, None)\n\n # Since the handler will also be called on messages, we need to check if\n # the message is actually a command\n if chat_state == MENU and text[0] == '/':\n state[chat_id] = AWAIT_INPUT # set the state\n context[chat_id] = user_id # save the user id to context\n bot.sendMessage(chat_id,\n text=\"Por favor ingresa tu valor o \"\n \"/cancel para cancelar\",\n reply_markup=ForceReply())\n\n # If we are waiting for input and the right user answered\n elif chat_state == AWAIT_INPUT and chat_context == user_id:\n state[chat_id] = AWAIT_CONFIRMATION\n\n # Save the user id and the answer to context\n context[chat_id] = (user_id, update.message.text)\n reply_markup = ReplyKeyboardMarkup([[YES, NO]], resize_keyboard=True, one_time_keyboard=True)\n bot.sendMessage(chat_id, text=\"Estas seguro?\",\n reply_markup=reply_markup)\n\n # If we are waiting for confirmation and the right user answered\n elif chat_state == AWAIT_CONFIRMATION and chat_context[0] == user_id:\n state[chat_id] = MENU\n context[chat_id] = None\n if text == YES:\n values[chat_id] = chat_context[1]\n bot.sendMessage(chat_id,\n text=\"Valor cambiado a : %s.\" % values[chat_id])\n else:\n bot.sendMessage(chat_id,\n text=\"Valor no cambiado: %s.\"\n % values.get(chat_id, ''))\n\n\n# Handler for the /cancel command.\n# Sets the state back to MENU and clears the context\ndef cancel(bot, update):\n chat_id = update.message.chat_id\n state[chat_id] = MENU\n context[chat_id] = None\n\ndef payments(bot, update):\n chat_id = update.message.chat_id\n state[chat_id] = MENU\n context[chat_id] = None\n reply_markup = ReplyKeyboardMarkup([[\"Deposito\"], [\"Recarga\"], [\"Servicios\"], [\"Consulta\"]], resize_keyboard=True, one_time_keyboard=True)\n bot.sendMessage(chat_id, text=\"Selecciona la transaccion:\",\n reply_markup=reply_markup)\n\n\ndef help(bot, update):\n bot.sendMessage(update.message.chat_id, text=\"Usa /set para configurar\")\n\n\n# Create the Updater and pass it your bot's token.\nupdater = Updater(\"202564241:AAGv1eVqFi9goTaNBQL8ZbymLH5xnnoqcHg\")\n\n# The command\nupdater.dispatcher.addTelegramCommandHandler('set', set_value)\n# The answer and confirmation\nupdater.dispatcher.addTelegramMessageHandler(set_value)\nupdater.dispatcher.addTelegramCommandHandler('cancel', cancel)\nupdater.dispatcher.addTelegramCommandHandler('start', help)\nupdater.dispatcher.addTelegramCommandHandler('help', help)\nupdater.dispatcher.addTelegramCommandHandler('payments', payments)\n\n\n# Start the Bot\nupdater.start_polling()\n\n# Run the bot until the user presses Ctrl-C or the process receives SIGINT,\n# SIGTERM or SIGABRT\nupdater.idle()","sub_path":"state_machine.py","file_name":"state_machine.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"558972560","text":"import setuptools\n\nlong_description = '''\nThis package extends the python-mwlinks toolkit released by mediawiki. \n\nExtracts links from MediaWiki with a focus on Wikipedia.\n\nThis library add multi-process utilities for extracting wikilinks from MediaWiki XML database dumps.\n'''\n\nsetuptools.setup(\n name=\"python-mwlinks\",\n version=\"0.0.1\",\n url=\"https://github.com/hunterhector/python-mwlinks\",\n\n description=\"A python MediaWiki Link parser\",\n long_description=long_description,\n\n packages=setuptools.find_packages(),\n platforms='any',\n\n install_requires=[\n 'mwxml',\n 'mwtypes',\n 'docopt',\n 'jsonable',\n ],\n extras_require={\n },\n package_data={\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"}
+{"seq_id":"198633884","text":"from pytezos.michelson.pack import pack\nfrom pytezos.repl.control import instruction\nfrom pytezos.repl.context import Context\nfrom pytezos.repl.types import assert_stack_type, Option, Pair, String, Bytes, List, BigMap, Map, Set, Or, Bool, Nat, \\\n Unit, StackItem, dispatch_type_map\nfrom pytezos.michelson.pack import unpack\n\n\n@instruction(['CAR', 'CDR'])\ndef do_car(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Pair)\n idx = {'CAR': 0, 'CDR': 1}\n res = top.get_element(idx[prim])\n ctx.push(res, annots=annots)\n\n\n@instruction('CONCAT')\ndef do_concat(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, [String, Bytes, List])\n if type(top) in [String, Bytes]:\n second = ctx.pop1()\n val_type = dispatch_type_map(top, second, {\n (String, String): str,\n (Bytes, Bytes): bytes\n })\n res = type(top)(val_type(top) + val_type(second))\n elif type(top) == List:\n res_type = top.val_type()\n val_type, sep = {\n String: (str, ''),\n Bytes: (bytes, b'')\n }[res_type]\n res = res_type(sep.join(map(val_type, top)))\n else:\n assert False\n ctx.push(res, annots=annots)\n\n\n@instruction('CONS')\ndef do_cons(ctx: Context, prim, args, annots):\n val, container = ctx.pop2()\n assert_stack_type(container, List)\n res = container.prepend(val)\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_BIG_MAP', args_len=2)\ndef do_empty_big_map(ctx: Context, prim, args, annots):\n res = ctx.big_maps.empty(k_type_expr=args[0], v_type_expr=args[1])\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_MAP', args_len=2)\ndef do_empty_map(ctx: Context, prim, args, annots):\n res = Map.empty(k_type_expr=args[0], v_type_expr=args[1])\n ctx.push(res, annots=annots)\n\n\n@instruction('EMPTY_SET', args_len=1)\ndef do_empty_set(ctx: Context, prim, args, annots):\n res = Set.empty(k_type_expr=args[0])\n ctx.push(res, annots=annots)\n\n\n@instruction('GET')\ndef do_get(ctx: Context, prim, args, annots):\n key, container = ctx.pop2()\n assert_stack_type(container, [Map, BigMap])\n\n if type(container) == Map:\n val = container.find(key)\n else:\n val = ctx.big_maps.find(container, key)\n\n if val is not None:\n res = Option.some(val)\n else:\n res = Option.none(container.val_type_expr())\n\n ctx.push(res, annots=annots)\n\n\n@instruction(['LEFT', 'RIGHT'], args_len=1)\ndef do_left(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n if prim == 'LEFT':\n res = Or.left(r_type_expr=args[0], item=top)\n else:\n res = Or.right(l_type_expr=args[0], item=top)\n ctx.push(res, annots=annots)\n\n\n@instruction('MEM')\ndef do_mem(ctx: Context, prim, args, annots):\n key, container = ctx.pop2()\n assert_stack_type(container, [Set, Map, BigMap])\n if type(container) == BigMap:\n res = Bool(ctx.big_maps.contains(container, key))\n else:\n res = Bool(key in container)\n ctx.push(res, annots=annots)\n\n\n@instruction('NIL', args_len=1)\ndef do_nil(ctx: Context, prim, args, annots):\n nil = List.empty(args[0])\n ctx.push(nil, annots=annots)\n\n\n@instruction('NONE', args_len=1)\ndef do_none(ctx: Context, prim, args, annots):\n none = Option.none(args[0])\n ctx.push(none, annots=annots)\n\n\n@instruction('PACK')\ndef do_pack(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n res = Bytes(pack(top.val_expr, top.type_expr))\n ctx.push(res, annots=annots)\n\n\n@instruction('PAIR')\ndef do_pair(ctx: Context, prim, args, annots):\n left, right = ctx.pop2()\n res = Pair.new(left, right)\n ctx.push(res, annots=annots)\n\n\n@instruction('SIZE')\ndef do_size(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, [String, Bytes, List, Set, Map])\n res = Nat(len(top))\n ctx.push(res, annots=annots)\n\n\n@instruction('SLICE')\ndef do_slice(ctx: Context, prim, args, annots):\n offset, length, s = ctx.pop3()\n assert_stack_type(s, [String, Bytes])\n offset, length = int(offset), int(length)\n if offset + length <= len(s):\n res = Option.some(s[offset:offset+length])\n else:\n res = Option.none(type(s)().type_expr)\n ctx.push(res, annots=annots)\n\n\n@instruction('SOME')\ndef do_some(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n res = Option.some(top)\n ctx.push(res, annots=annots)\n\n\n@instruction('UNIT')\ndef do_unit(ctx: Context, prim, args, annots):\n ctx.push(Unit(), annots=annots)\n\n\n@instruction('UNPACK', args_len=1)\ndef do_unpack(ctx: Context, prim, args, annots):\n top = ctx.pop1()\n assert_stack_type(top, Bytes)\n try:\n val_expr = unpack(data=bytes(top), type_expr=args[0])\n item = StackItem.parse(val_expr=val_expr, type_expr=args[0])\n res = Option.some(item)\n except Exception as e:\n ctx.print(f'failed: {e}')\n res = Option.none(args[0])\n ctx.push(res, annots=annots)\n\n\n@instruction('UPDATE')\ndef do_update(ctx: Context, prim, args, annots):\n key, val, container = ctx.pop3()\n assert_stack_type(container, [Set, Map, BigMap])\n\n if type(container) == Set:\n assert_stack_type(val, Bool)\n if val:\n res = container.add(key)\n else:\n res = container.remove(key)\n else:\n assert_stack_type(val, Option)\n if val.is_none():\n res = container.remove(key)\n else:\n res = container.update(key, val.get_some())\n\n ctx.push(res, annots=annots)\n","sub_path":"pytezos/repl/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"523119545","text":"# Cracking the Coding Interview\n# Problem 1.1\n# string has unique characters?\n\n\ndef is_unique(string):\n # use hash table (dictionary) to store seen chars\n if type(string) is not str:\n raise ValueError(\"is_unique argument must be a string\")\n table = {}\n for letter in string:\n if table.get(letter) is not None:\n return False\n table[letter] = True\n return True\n\n\ndef is_unique_alt(string):\n # use ASCII values as an index to a list to check for repeats\n if type(string) is not str:\n raise ValueError(\"is_unique argument must be a string\")\n table = [None] * 256\n for letter in string:\n if table[ord(letter)] is None:\n table[ord(letter)] = True\n else:\n return False\n return True\n\n\nassert is_unique(\"1234\") is True\nassert is_unique(\"helloworld\") is False\n\nassert is_unique_alt(\"1234\") is True\nassert is_unique_alt(\"helloworld\") is False\n\n","sub_path":"CrackingTheCodingInterview/ArraysAndStrings/is_unique.py","file_name":"is_unique.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"275602598","text":"import pickle\r\nimport socket\r\nfrom _thread import *\r\nfrom JobList import *\r\nfrom JobCreator import *\r\nfrom JobSeeker import *\r\nfrom FileRecord import *\r\nimport sys\r\ntry:\r\n from ip2geotools.databases.noncommercial import DbIpCity\r\nexcept ImportError:\r\n print(\"Need to install ip2geotools to continue\")\r\n sys.exit(0)\r\n\r\nclass Server(object):\r\n\r\n #Message Variables:\r\n initialConnectionMessage = [\"LOGIN \", \"POSITION SELECTION: \", \"\",\r\n \"\"]\r\n\r\n jobCreatorCommandMessage = [\"CREATEJOB \",\r\n \"REMOVEJOB \",\r\n \"VIEWJOBS\",\r\n \"STARTJOB \",\r\n \"JOBTYPE SELECTION: \",\r\n \"\", \"\",\r\n \"\", \"\",\r\n \"\", \"\"]\r\n\r\n jobSeekerCommandMessage = [\"VIEWJOBS\",\r\n \"JOINJOB \",\r\n \"COMPLETEJOB \"]\r\n\r\n def __init__(self):\r\n self.ServerSocket = socket.socket()\r\n self.host = '127.0.0.1'\r\n self.port = 1233\r\n self.ThreadCount = 0\r\n self.jobListOBJ = JobList()\r\n self.fileRecordOBJ = FileRecord()\r\n self.jobCreatorList = []\r\n self.jobSeekerList = []\r\n self.command = \"\"\r\n self.parameterList = []\r\n self.readBackup()\r\n self.count = 0\r\n\r\n\r\n # Bind socket to port\r\n try:\r\n self.ServerSocket.bind((self.host, self.port))\r\n except socket.error as e:\r\n print(str(e))\r\n\r\n print('Waiting for a Connection..')\r\n self.ServerSocket.listen(5)\r\n\r\n def main(self):\r\n while True:\r\n Client, address = self.ServerSocket.accept()\r\n print('Connected to: ' + address[0] + ':' + str(address[1]))\r\n start_new_thread(self.threadedClient, (Client,))\r\n self.ThreadCount += 1\r\n print('Thread Number: ' + str(self.ThreadCount))\r\n\r\n #COMPLETE\r\n def threadedClient(self, connection):\r\n self.connectionMessage(connection)\r\n\r\n while True:\r\n # Limiting to 2048 Bytes\r\n clientMessage = connection.recv(2048)\r\n\r\n # Receiving Message From Client\r\n self.command = pickle.loads(clientMessage)\r\n\r\n self.ParseCommand(self.command)\r\n\r\n self.commandRouting(connection, self.parameterList)\r\n\r\n #COMPLETE\r\n def connectionMessage(self, connection):\r\n connection.send(pickle.dumps(self.initialConnectionMessage))\r\n\r\n #COMPLETE\r\n def ParseCommand(self, Command):\r\n self.parameterList = Command.split(\" \")\r\n\r\n #COMPLETE\r\n def login(self, connection, parameterList):\r\n if parameterList[3] == \"JobCreator\":\r\n self.jobCreatorList.append(JobCreator(parameterList[1], parameterList[2]))\r\n connection.send(pickle.dumps(self.jobCreatorCommandMessage))\r\n\r\n elif parameterList[3] == \"JobSeeker\":\r\n self.jobCreatorList.append(JobSeeker(parameterList[1], parameterList[2]))\r\n connection.send(pickle.dumps(self.jobSeekerCommandMessage))\r\n else:\r\n connection.send(pickle.dumps(\"Not a valid position\"))\r\n\r\n #COMPLETE\r\n def createJob(self, connection, parameterList):\r\n connection.send(pickle.dumps(\"Job has been created and added to the Job List\"))\r\n self.jobListOBJ.updateJobList(parameterList[1], parameterList[2], parameterList[3], parameterList[4],\r\n parameterList[5])\r\n\r\n self.fileRecordOBJ.updateJobListBackup(self.jobListOBJ.listofjobs)\r\n\r\n #COMPLETE\r\n def removeJob(self, connection, parameterList):\r\n for Job in self.jobListOBJ.listofjobs:\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2]:\r\n connection.send(pickle.dumps(Job.FullJob + \" has been removed from the Job List\"))\r\n self.jobListOBJ.listofjobs.remove(Job)\r\n else:\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n\r\n #COMPLETE\r\n def viewJobs(self, connection):\r\n if len(self.jobListOBJ.listofjobs) == 0:\r\n connection.send(pickle.dumps(\"No Jobs Posted\"))\r\n else:\r\n try:\r\n connection.send(pickle.dumps(self.jobListOBJ.listofjobs))\r\n except EOFError:\r\n pass\r\n\r\n #COMPLETE\r\n def joinJob(self, connection, parameterList):\r\n count = 0\r\n for Job in self.jobListOBJ.listofjobs:\r\n count += 1\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[\r\n 2] and Job.NumOfSeekers == \"Job Started\":\r\n connection.send(pickle.dumps(\"Job is full\"))\r\n break\r\n\r\n if count > len(self.jobListOBJ.listofjobs):\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n break\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2] and int(\r\n Job.NumOfSeekers) != 0:\r\n connection.send(pickle.dumps(parameterList[3] + \" has joined: \" + Job.FullJob))\r\n Job.JobSeekerList.append(parameterList[3])\r\n Job.NumOfSeekers = int(Job.NumOfSeekers) - 1\r\n Job.NumOfSeekers = str(Job.NumOfSeekers)\r\n break\r\n\r\n #COMPLETE\r\n def startJob(self, connection, parameterList):\r\n count = 0\r\n for Job in self.jobListOBJ.listofjobs:\r\n count += 1\r\n if count > len(self.jobListOBJ.listofjobs):\r\n connection.send(pickle.dumps(\"Entered Job Does Not Exist In Job List\"))\r\n break\r\n\r\n if Job.jobParameters[0] == parameterList[1] and Job.jobParameters[1] == parameterList[2]:\r\n connection.send(pickle.dumps(Job.FullJob + \" has been started\"))\r\n Job.setNumOfSeekers(\"Job Started\")\r\n break\r\n\r\n #COMPLETE\r\n def completeJob(self, connection, parameterList):\r\n\r\n print(\"Sending Job Type To Client\")\r\n connection.send(pickle.dumps(parameterList[2]))\r\n print(\"Sending Target IP To Client (If Needed)\")\r\n connection.send(pickle.dumps(parameterList[3]))\r\n print(\"Sending Target Port To Client (If Needed)\")\r\n connection.send(pickle.dumps(parameterList[4]))\r\n\r\n print(\"Waiting For Response From Client\")\r\n\r\n #Limiting to 2048 Bytes\r\n clientOutput = connection.recv(2048)\r\n\r\n print(\"Received Response From Client\")\r\n\r\n #Receiving Message From Client\r\n clientCompletion = pickle.loads(clientOutput)\r\n\r\n print(\"Response From Client Saved\")\r\n\r\n #Recording Multi Lined Client Output\r\n if type(clientCompletion) == list:\r\n\r\n for hosts in clientCompletion:\r\n self.fileRecordOBJ.recordOutput(hosts)\r\n\r\n #Recording Single Lined Client Output\r\n else:\r\n self.fileRecordOBJ.recordOutput(clientCompletion)\r\n\r\n #COMPLETE\r\n def commandRouting(self, connection, parameterList):\r\n if parameterList[0] == \"LOGIN\":\r\n self.login(connection, parameterList)\r\n elif parameterList[0] == \"CREATEJOB\":\r\n self.createJob(connection, parameterList)\r\n elif parameterList[0] == \"REMOVEJOB\":\r\n self.removeJob(connection, parameterList)\r\n elif parameterList[0] == \"VIEWJOBS\":\r\n self.viewJobs(connection)\r\n elif parameterList[0] == \"JOINJOB\":\r\n self.joinJob(connection, parameterList)\r\n elif parameterList[0] == \"STARTJOB\":\r\n self.startJob(connection, parameterList)\r\n elif parameterList[0] == \"COMPLETEJOB\":\r\n self.completeJob(connection, parameterList)\r\n else:\r\n connection.send(pickle.dumps(\"Invalid Command\"))\r\n\r\n #COMPLETE\r\n def readBackup(self):\r\n try:\r\n backup = open(\"JobBackup.txt\", 'r')\r\n backupList = backup.readlines()\r\n\r\n for lines in backupList:\r\n self.ParseCommand(lines.rstrip('\\n'))\r\n self.jobListOBJ.updateJobList(self.parameterList[0], self.parameterList[1], self.parameterList[2],\r\n self.parameterList[3], self.parameterList[4])\r\n print(\"Jobs Have Been Restored\")\r\n except IOError:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = Server()\r\n s.main()","sub_path":"Final Project/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"155650648","text":"# Purpose\n#\tCalculate barcode counts from read counts\n\n\n# !! NOTE !!\n# if inputting DNAbc and RNAbc, be sure to enter the DNAbc files first. \n\n\n# Usage \n#\tpython x2.collapdsed2counts.py ... \n\n# Arguments\n#\t \t\tdirectory of input data\n#\t\t\t\tname of sample (is used to name the job)\n# integer. Number of gigabytes of memory requested for computation\n# integer. Maximum computation time requested \n# email to send alerts to \n\n# Inputs\n# directory and filename of _collapsed.txt file (output of x1.collapseSeq.py)\n#\t\tthe next input file\n#\t!!! Note! x3.bc2enhancer.py assumes all DNAbc counts come before RNAbc counts. Therefore, input all DNAbc counts before RNAbc counts when calling this program. \t\n# Outputs\n# \tbarcode2reads.txt\t\ttsv file \n#\t\t\t\t\theader line = filenames inputted into analysis\n#\t\t\t\t\tcolumn1 = barcode\n#\t\t\t\t\tcolumn2-n = barcode counts [units = reads per million within each file]\n\n\n\n\n\nimport os\nimport sys\n\n# ensure all arguments passed\ntry:\n\toutputDir=sys.argv[1]+'/' \n\tjobName=sys.argv[2]\n\tmem=sys.argv[3]\n\tjobHours=sys.argv[4]\n\temail=sys.argv[5]\n\tinputList=sys.argv[6:]\n\tinputStr=' '.join(inputList)\nexcept IndexError:\n\tprint(\"Error: Not all arguments passed\")\n\n# Make dirs for submission scripts and batch stderr/stdout files to be saved\nos.system(\"mkdir \"+outputDir+\"stdout 2>/dev/null\")\nos.system(\"mkdir \"+outputDir+\"stderr 2>/dev/null\")\n\n# Script version to use\nRNAseq2reads='1.RNAseq2reads.py'\n\n# Create batch submit script\nline_out=\"#!/bin/bash\\n\"\nline_out+=\"#SBATCH --partition=shared\\n\"\nline_out+=\"#SBATCH --job-name=RNAseq2reads:\"+jobName+\"\\n\"\nline_out+=\"#SBATCH --nodes=1\\n\"\nline_out+=\"#SBATCH --ntasks-per-node=1\\n\"\nline_out+=\"#SBATCH --mem=\"+mem+\"G\\n\"\nline_out+=\"#SBATCH --time=\"+jobHours+\":00:00\\n\"\nline_out+=\"#SBATCH --output=\"+outputDir+\"stdout/\"+jobName+\".out.txt\\n\"\nline_out+=\"#SBATCH --error=\"+outputDir+\"stderr/\"+jobName+\".err.txt\\n\"\nline_out+=\"#SBATCH --export=ALL\\n\"\nline_out+=\"#SBATCH --mail-user=\"+email+\"\\n\"\nline_out+=\"#SBATCH --mail-type=ALL\\n\"\nline_out+=\"module load python\\n\" # load package numpy\nline_out+=\"module load scipy\\n\" # load package numpy\nline_out+=\"module load biopython\\n\" # load package biopython\nline_out+=\" \".join([\"python\",RNAseq2reads,outputDir]+inputList) \n\n# Write and submit batch script\nwith open(\"submit_RNAseq2reads.sh\",\"w\") as fn:\n\tfn.write(line_out)\nos.system(\"sbatch submit_RNAseq2reads.sh\")\n\n# Copy submit script\nos.system(\"mkdir \"+outputDir+\"submit-scripts 2>/dev/null\")\nos.system(\"cp submit_RNAseq2reads.sh \"+outputDir+\"submit-scripts/RNAseq2reads-\"+jobName+\".submit.sh\")\n\n# Copy script to data file\nos.system(\"mkdir \"+outputDir+\"scripts 2>/dev/null\")\nos.system(' '.join([\"cp\",RNAseq2reads,outputDir+\"scripts/\"+RNAseq2reads]))\n","sub_path":"RNA_library/x2.collapsed2counts.py","file_name":"x2.collapsed2counts.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"}
+{"seq_id":"601161077","text":"from numba import njit\nfrom numba.typed import List\nimport numpy as np\nfrom ..utils.stack_txt_LR import *\n\n# @njit\ndef get_semicircle(txt,deg,x0,y0):\n #make the initialization mesh\n img = 0*txt[...,0].copy()\n color_left_of_line(out=img, x0=x0, y0=y0, deg = deg, value=1.) #make left of line mesh 0 or 1\n color_outside_range(out=img, x0=x0,y0=y0,r=64, val=0.0) #make circle mesh 0 or 1\n #already done: hadamard product of the two.\n return img\n\n@njit\ndef color_within_range(x0,y0,r, out, val=1.0, width=512,height=512):\n\tfor x in range(width):\n\t\tdx = x-x0\n\t\tfor y in range(height):\n\t\t\tdy = y-y0\n\t\t\tif np.sqrt(dx**2+dy**2)<=r:\n\t\t\t\tout[y,x] = val\n@njit\ndef color_outside_range(x0,y0,r, out, val=0.0):\n width = out.shape[0]\n height = out.shape[1]\n for x in range(width):\n dx = x-x0\n for y in range(height):\n dy = y-y0\n if np.sqrt(dx**2+dy**2)>r:\n out[y,x] = val\n\n\n@njit\ndef make_coordinate_textures(txt):\n txtx = txt.copy()\n txty = txt.copy()\n for y in range(txt.shape[0]):\n for x in range(txt.shape[1]):\n txtx[x,y] = x\n txty[x,y] = y\n return txtx, txty\n\n# @njit #njit crashes rn\ndef color_left_of_line(out, x0, y0, deg = 45, value=10.):\n width = out.shape[1]\n x0 = int(x0)\n y0 = int(y0)\n for y in range(out.shape[0]):\n l = linear_interpolate_row_to_column(y, x0=x0, y0=y0, deg = deg)\n for x in range(width):\n if x\"\n html = html+\"Summary of keywords matched per province - \"+pdate+\"
\"+summary+\"
See attached CSV file for full details.